hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1aea6fbd848c726e5b5cdb1953e414611ea0a6
| 5,954 |
py
|
Python
|
models/user.py
|
jungor/android_final_project
|
28fe804912d72935066c507ed2f2a5d05bf8eccc
|
[
"MIT"
] | null | null | null |
models/user.py
|
jungor/android_final_project
|
28fe804912d72935066c507ed2f2a5d05bf8eccc
|
[
"MIT"
] | 1 |
2015-12-05T11:25:59.000Z
|
2015-12-05T11:25:59.000Z
|
models/user.py
|
jungor/android_final_project
|
28fe804912d72935066c507ed2f2a5d05bf8eccc
|
[
"MIT"
] | null | null | null |
# -*-coding:utf-8 -*-
import base64
import os
import re
from datetime import datetime
from consts import *
from bson.objectid import ObjectId
from db import get_db
IMG_DIR = os.path.join(os.path.dirname(__file__), os.pardir, "static", "img", "user")
class User(object):
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, value):
if value:
self._uid = value
else:
raise ValueError("You did not provide valid uid")
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value:
self._name = value
else:
raise ValueError("You did not provide valid name")
@property
def pwd(self):
return self._pwd
@pwd.setter
def pwd(self, value):
if value:
self._pwd = value
else:
raise ValueError("You did not provide valid pwd")
@property
def sex(self):
return self._sex
@sex.setter
def sex(self, value):
try:
value = int(value)
if value in xrange(2):
self._sex = value
else:
raise ValueError("You did not provide valid sex")
except ValueError, e:
raise e
@property
def major(self):
return self._major
@major.setter
def major(self, value):
try:
value = int(value)
if value in xrange(999):
self._major = value
else:
raise ValueError("You did not provide valid major")
except ValueError, e:
raise e
@property
def grade(self):
return self._grade
@grade.setter
def grade(self, value):
try:
value = int(value)
if value in xrange(4):
self._grade = value
else:
raise ValueError("You did not provide valid grade")
except ValueError, e:
raise e
@property
def avatar_url(self):
return self._avatar_url
@avatar_url.setter
def avatar_url(self, value):
if value:
value = base64.b64decode(value)
now = re.sub(r'[ :.]', '-', str(datetime.now()))
path = os.path.join(IMG_DIR, str(self.name) + "_avatar" + now + ".png")
avatar_file = open(path, 'wb')
avatar_file.write(value)
self._avatar_url = SERVER_ROOT_URL + '/static/' + '/'.join(path.split('/')[-3:])
avatar_file.close()
else:
raise ValueError("You did not provide valid avatar")
def __init__(self, name, pwd, sex, major, grade, avatar=None, uid=None, **kwargs):
super(User, self).__init__()
self._uid = None
self._name = None
self._pwd = None
self._sex = None
self._major = None
self._grade = None
self._avatar_url = SERVER_ROOT_URL + "/static/img/user/default_avatar.png"
self._new_psw = None
self.name = name
self.pwd = pwd
self.sex = sex
self.major = major
self.grade = grade
if avatar:
self.avatar_url = avatar
if uid:
self.uid = uid
def save(self):
db = get_db()
if self.uid:
db["Users"].update(
{
"_id": ObjectId(self.uid),
},
{
"name": self.name,
"pwd": self.pwd,
"sex": self.sex,
"major": self.major,
"grade": self.grade,
"avatar_url": self.avatar_url,
},
)
else:
uid_obj = db["Users"].insert(
{
"name": self.name,
"pwd": self.pwd,
"sex": self.sex,
"major": self.major,
"grade": self.grade,
"like": [],
"collect": [],
"avatar_url": self.avatar_url,
}
)
self.uid = str(uid_obj)
@classmethod
def is_name_exist(cls, name):
db = get_db()
doc = db["Users"].find_one({"name": name})
if doc:
return True
else:
return False
@classmethod
def authenticate(cls, name, pwd):
if cls.is_name_exist(name):
db = get_db()
doc = db["Users"].find_one({"name": name, "pwd": pwd})
return doc
else:
return None
@classmethod
def get(cls, uid):
db = get_db()
doc = db["Users"].find_one({"_id": ObjectId(uid)})
if doc:
print doc
u = cls(**doc)
u._avatar_url = doc["avatar_url"]
u.uid = uid
return u
else:
return None
@classmethod
def like(cls, uid, aid):
db = get_db()
db["Users"].update(
{"_id": ObjectId(uid)},
{"$addToSet": {"like": aid}}
)
@classmethod
def unlike(cls, uid, aid):
db = get_db()
db["Users"].update(
{"_id": ObjectId(uid)},
{"$pull": {"like": aid}}
)
@classmethod
def collect(cls, uid, aid):
db = get_db()
db["Users"].update(
{"_id": ObjectId(uid)},
{"$addToSet": {"collect": aid}}
)
@classmethod
def uncollect(cls, uid, aid):
db = get_db()
db["Users"].update(
{"_id": ObjectId(uid)},
{"$pull": {"collect": aid}}
)
@classmethod
def reset(cls):
db = get_db()
db["Users"].remove({})
if __name__ == "__main__":
pass
| 25.663793 | 92 | 0.466913 |
4a1aea710154c40b5236f835aee4867d3f950469
| 3,392 |
py
|
Python
|
follow.py
|
ligongzzz/AC-SJTU-OJ
|
a6e3bffd1c025ac114dd9a6ebec9bafbf0b5360a
|
[
"MIT"
] | 1 |
2019-08-11T15:11:07.000Z
|
2019-08-11T15:11:07.000Z
|
follow.py
|
ligongzzz/AC-SJTU-OJ
|
a6e3bffd1c025ac114dd9a6ebec9bafbf0b5360a
|
[
"MIT"
] | null | null | null |
follow.py
|
ligongzzz/AC-SJTU-OJ
|
a6e3bffd1c025ac114dd9a6ebec9bafbf0b5360a
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import time
import logging
logging.basicConfig(filename="test.log", filemode="w", format="%(asctime)s %(name)s:%(levelname)s:%(message)s",
datefmt="%d-%M-%Y %H:%M:%S", level=logging.INFO)
# Hyper Parameters
SLEEP_TIME = 3
print('题目跟踪器,跟踪OJ上别人刷的题目')
username = input('您的用户名:')
password = input('您的密码:')
problem_to_solve = None
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument('--no-sandbox')
browser = webdriver.Chrome(options=option)
last_code = None
print('正在登录...')
browser.get("https://acm.sjtu.edu.cn/OnlineJudge")
input_username = browser.find_element(By.NAME, 'username')
input_password = browser.find_element(By.NAME, 'password')
btn_login = browser.find_element(By.NAME, 'action')
actions = ActionChains(browser)
actions.send_keys_to_element(input_username, username)
actions.send_keys_to_element(input_password, password)
actions.click(btn_login)
actions.perform()
print('登录成功!')
logging.info('登录完成')
while True:
try:
browser.get('https://acm.sjtu.edu.cn/OnlineJudge/status#')
while True:
time.sleep(SLEEP_TIME)
browser.refresh()
print('获取最新提交编号......')
cur_code = browser.find_element(
By.XPATH, '//*[@id="status"]/tbody/tr[1]/td[1]/a').text
if last_code != cur_code:
last_code = cur_code
user_submit: str = browser.find_element(
By.XPATH, '//*[@id="status"]/tbody/tr[1]/td[2]').text
[user_id, _] = user_submit.split()
print('提交的用户是', user_id)
if user_id == username:
print('无新的提交')
logging.warning('无新的提交')
continue
elif user_id != '魔卡少女':
print('不是魔卡少女的提交')
logging.log('找到非指定目标的提交')
continue
problem_to_solve = browser.find_element(
By.XPATH, '//*[@id="status"]/tbody/tr[1]/td[3]/a[1]').text
print('找到新的提交,题号是', problem_to_solve)
logging.log('找到新的提交,题号是'+problem_to_solve)
break
else:
print('无新的提交')
logging.warning('无新的提交')
print("正在获取代码......")
browser.get(
'https://raw.githubusercontent.com/ligongzzz/SJTU-OJ/master/Code/Project'
+ problem_to_solve + '/Project' + problem_to_solve + '/源.cpp')
print('代码获取成功!')
code_to_input = browser.find_element_by_xpath('/html/body/pre').text
print(code_to_input)
print('正在提交代码......')
browser.get("https://acm.sjtu.edu.cn/OnlineJudge/submit")
input_problem = browser.find_element(By.NAME, 'problem')
input_code = browser.find_element(By.NAME, 'code')
btn_submit = browser.find_element(
By.XPATH, '//*[@id="wrap"]/div/form/fieldset/div[4]/button')
actions = ActionChains(browser)
actions.send_keys_to_element(input_problem, problem_to_solve)
actions.send_keys_to_element(input_code, code_to_input)
actions.click(btn_submit)
actions.perform()
print('提交完成')
browser.refresh()
except:
print('抱歉!出现错误')
| 33.584158 | 111 | 0.6023 |
4a1aea946a5b4bcc07a470c873889b850e38bd82
| 14,026 |
py
|
Python
|
networkx-1.8.1/networkx/readwrite/edgelist.py
|
lthurlow/Boolean-Constrained-Routing
|
d60a648d17d5dadaa0fb5ef9961fcea9d9784dfe
|
[
"MIT"
] | 12 |
2015-03-25T20:20:26.000Z
|
2021-11-14T19:44:56.000Z
|
networkx-1.8.1/networkx/readwrite/edgelist.py
|
lthurlow/Boolean-Constrained-Routing
|
d60a648d17d5dadaa0fb5ef9961fcea9d9784dfe
|
[
"MIT"
] | 71 |
2015-01-05T16:50:55.000Z
|
2020-09-30T19:17:47.000Z
|
networkx-1.8.1/networkx/readwrite/edgelist.py
|
lthurlow/Boolean-Constrained-Routing
|
d60a648d17d5dadaa0fb5ef9961fcea9d9784dfe
|
[
"MIT"
] | 14 |
2015-02-15T22:19:18.000Z
|
2020-09-30T18:54:54.000Z
|
"""
**********
Edge Lists
**********
Read and write NetworkX graphs as edge lists.
The multi-line adjacency list format is useful for graphs with nodes
that can be meaningfully represented as strings. With the edgelist
format simple edge data can be stored but node or graph data is not.
There is no way of representing isolated nodes unless the node has a
self-loop edge.
Format
------
You can read or write three formats of edge lists with these functions.
Node pairs with no data::
1 2
Python dictionary as data::
1 2 {'weight':7, 'color':'green'}
Arbitrary data::
1 2 7 green
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['generate_edgelist',
'write_edgelist',
'parse_edgelist',
'read_edgelist',
'read_weighted_edgelist',
'write_weighted_edgelist']
from networkx.utils import open_file, make_str
import networkx as nx
def generate_edgelist(G, delimiter=' ', data=True):
"""Generate a single line of the graph G in edge list format.
Parameters
----------
G : NetworkX graph
delimiter : string, optional
Separator for node labels
data : bool or list of keys
If False generate no edge data. If True use a dictionary
representation of edge data. If a list of keys use a list of data
values corresponding to the keys.
Returns
-------
lines : string
Lines of data in adjlist format.
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> G[1][2]['weight'] = 3
>>> G[3][4]['capacity'] = 12
>>> for line in nx.generate_edgelist(G, data=False):
... print(line)
0 1
0 2
0 3
1 2
1 3
2 3
3 4
4 5
5 6
>>> for line in nx.generate_edgelist(G):
... print(line)
0 1 {}
0 2 {}
0 3 {}
1 2 {'weight': 3}
1 3 {}
2 3 {}
3 4 {'capacity': 12}
4 5 {}
5 6 {}
>>> for line in nx.generate_edgelist(G,data=['weight']):
... print(line)
0 1
0 2
0 3
1 2 3
1 3
2 3
3 4
4 5
5 6
See Also
--------
write_adjlist, read_adjlist
"""
if data is True or data is False:
for e in G.edges(data=data):
yield delimiter.join(map(make_str,e))
else:
for u,v,d in G.edges(data=True):
e=[u,v]
try:
e.extend(d[k] for k in data)
except KeyError:
pass # missing data for this edge, should warn?
yield delimiter.join(map(make_str,e))
@open_file(1,mode='wb')
def write_edgelist(G, path, comments="#", delimiter=' ', data=True,
encoding = 'utf-8'):
"""Write graph as a list of edges.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string used to separate values. The default is whitespace.
data : bool or list, optional
If False write no edge data.
If True write a string representation of the edge data dictionary..
If a list (or other iterable) is provided, write the keys specified
in the list.
encoding: string, optional
Specify which encoding to use when writing file.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_edgelist(G, "test.edgelist")
>>> G=nx.path_graph(4)
>>> fh=open("test.edgelist",'wb')
>>> nx.write_edgelist(G, fh)
>>> nx.write_edgelist(G, "test.edgelist.gz")
>>> nx.write_edgelist(G, "test.edgelist.gz", data=False)
>>> G=nx.Graph()
>>> G.add_edge(1,2,weight=7,color='red')
>>> nx.write_edgelist(G,'test.edgelist',data=False)
>>> nx.write_edgelist(G,'test.edgelist',data=['color'])
>>> nx.write_edgelist(G,'test.edgelist',data=['color','weight'])
See Also
--------
write_edgelist()
write_weighted_edgelist()
"""
for line in generate_edgelist(G, delimiter, data):
line+='\n'
path.write(line.encode(encoding))
def parse_edgelist(lines, comments='#', delimiter=None,
create_using=None, nodetype=None, data=True):
"""Parse lines of an edge list representation of a graph.
Returns
-------
G: NetworkX Graph
The graph corresponding to lines
data : bool or list of (label,type) tuples
If False generate no edge data or if True use a dictionary
representation of edge data or a list tuples specifying dictionary
key names and types for edge data.
create_using: NetworkX graph container, optional
Use given NetworkX graph for holding nodes or edges.
nodetype : Python type, optional
Convert nodes to this type.
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels
create_using: NetworkX graph container
Use given NetworkX graph for holding nodes or edges.
Examples
--------
Edgelist with no data:
>>> lines = ["1 2",
... "2 3",
... "3 4"]
>>> G = nx.parse_edgelist(lines, nodetype = int)
>>> G.nodes()
[1, 2, 3, 4]
>>> G.edges()
[(1, 2), (2, 3), (3, 4)]
Edgelist with data in Python dictionary representation:
>>> lines = ["1 2 {'weight':3}",
... "2 3 {'weight':27}",
... "3 4 {'weight':3.0}"]
>>> G = nx.parse_edgelist(lines, nodetype = int)
>>> G.nodes()
[1, 2, 3, 4]
>>> G.edges(data = True)
[(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})]
Edgelist with data in a list:
>>> lines = ["1 2 3",
... "2 3 27",
... "3 4 3.0"]
>>> G = nx.parse_edgelist(lines, nodetype = int, data=(('weight',float),))
>>> G.nodes()
[1, 2, 3, 4]
>>> G.edges(data = True)
[(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})]
See Also
--------
read_weighted_edgelist
"""
from ast import literal_eval
if create_using is None:
G=nx.Graph()
else:
try:
G=create_using
G.clear()
except:
raise TypeError("create_using input is not a NetworkX graph type")
for line in lines:
p=line.find(comments)
if p>=0:
line = line[:p]
if not len(line):
continue
# split line, should have 2 or more
s=line.strip().split(delimiter)
if len(s)<2:
continue
u=s.pop(0)
v=s.pop(0)
d=s
if nodetype is not None:
try:
u=nodetype(u)
v=nodetype(v)
except:
raise TypeError("Failed to convert nodes %s,%s to type %s."
%(u,v,nodetype))
if len(d)==0 or data is False:
# no data or data type specified
edgedata={}
elif data is True:
# no edge types specified
try: # try to evaluate as dictionary
edgedata=dict(literal_eval(' '.join(d)))
except:
raise TypeError(
"Failed to convert edge data (%s) to dictionary."%(d))
else:
# convert edge data to dictionary with specified keys and type
if len(d)!=len(data):
raise IndexError(
"Edge data %s and data_keys %s are not the same length"%
(d, data))
edgedata={}
for (edge_key,edge_type),edge_value in zip(data,d):
try:
edge_value=edge_type(edge_value)
except:
raise TypeError(
"Failed to convert %s data %s to type %s."
%(edge_key, edge_value, edge_type))
edgedata.update({edge_key:edge_value})
G.add_edge(u, v, attr_dict=edgedata)
return G
@open_file(0,mode='rb')
def read_edgelist(path, comments="#", delimiter=None, create_using=None,
nodetype=None, data=True, edgetype=None, encoding='utf-8'):
"""Read a graph from a list of edges.
Parameters
----------
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'rb' mode.
Filenames ending in .gz or .bz2 will be uncompressed.
comments : string, optional
The character used to indicate the start of a comment.
delimiter : string, optional
The string used to separate values. The default is whitespace.
create_using : Graph container, optional,
Use specified container to build graph. The default is networkx.Graph,
an undirected graph.
nodetype : int, float, str, Python type, optional
Convert node data from strings to specified type
data : bool or list of (label,type) tuples
Tuples specifying dictionary key names and types for edge data
edgetype : int, float, str, Python type, optional OBSOLETE
Convert edge data from strings to specified type and use as 'weight'
encoding: string, optional
Specify which encoding to use when reading file.
Returns
-------
G : graph
A networkx Graph or other type specified with create_using
Examples
--------
>>> nx.write_edgelist(nx.path_graph(4), "test.edgelist")
>>> G=nx.read_edgelist("test.edgelist")
>>> fh=open("test.edgelist", 'rb')
>>> G=nx.read_edgelist(fh)
>>> fh.close()
>>> G=nx.read_edgelist("test.edgelist", nodetype=int)
>>> G=nx.read_edgelist("test.edgelist",create_using=nx.DiGraph())
Edgelist with data in a list:
>>> textline = '1 2 3'
>>> fh = open('test.edgelist','w')
>>> d = fh.write(textline)
>>> fh.close()
>>> G = nx.read_edgelist('test.edgelist', nodetype=int, data=(('weight',float),))
>>> G.nodes()
[1, 2]
>>> G.edges(data = True)
[(1, 2, {'weight': 3.0})]
See parse_edgelist() for more examples of formatting.
See Also
--------
parse_edgelist
Notes
-----
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
"""
lines = (line.decode(encoding) for line in path)
return parse_edgelist(lines,comments=comments, delimiter=delimiter,
create_using=create_using, nodetype=nodetype,
data=data)
def write_weighted_edgelist(G, path, comments="#",
delimiter=' ', encoding='utf-8'):
"""Write graph G as a list of edges with numeric weights.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'wb' mode.
Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string used to separate values. The default is whitespace.
encoding: string, optional
Specify which encoding to use when writing file.
Examples
--------
>>> G=nx.Graph()
>>> G.add_edge(1,2,weight=7)
>>> nx.write_weighted_edgelist(G, 'test.weighted.edgelist')
See Also
--------
read_edgelist()
write_edgelist()
write_weighted_edgelist()
"""
write_edgelist(G,path, comments=comments, delimiter=delimiter,
data=('weight',), encoding = encoding)
def read_weighted_edgelist(path, comments="#", delimiter=None,
create_using=None, nodetype=None, encoding='utf-8'):
"""Read a graph as list of edges with numeric weights.
Parameters
----------
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'rb' mode.
Filenames ending in .gz or .bz2 will be uncompressed.
comments : string, optional
The character used to indicate the start of a comment.
delimiter : string, optional
The string used to separate values. The default is whitespace.
create_using : Graph container, optional,
Use specified container to build graph. The default is networkx.Graph,
an undirected graph.
nodetype : int, float, str, Python type, optional
Convert node data from strings to specified type
encoding: string, optional
Specify which encoding to use when reading file.
Returns
-------
G : graph
A networkx Graph or other type specified with create_using
Notes
-----
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
Example edgelist file format.
With numeric edge data::
# read with
# >>> G=nx.read_weighted_edgelist(fh)
# source target data
a b 1
a c 3.14159
d e 42
"""
return read_edgelist(path,
comments=comments,
delimiter=delimiter,
create_using=create_using,
nodetype=nodetype,
data=(('weight',float),),
encoding = encoding
)
# fixture for nose tests
def teardown_module(module):
import os
os.unlink('test.edgelist')
os.unlink('test.edgelist.gz')
os.unlink('test.weighted.edgelist')
| 30.163441 | 85 | 0.578212 |
4a1aeb89360699c1e71483b2cd1eebf531596c0f
| 5,331 |
py
|
Python
|
src/data_management/tests.py
|
tee-huynh/pipeline-profiles
|
1bdbe28f3d8d2d9b0be1546253a9af5e6cf95060
|
[
"MIT"
] | null | null | null |
src/data_management/tests.py
|
tee-huynh/pipeline-profiles
|
1bdbe28f3d8d2d9b0be1546253a9af5e6cf95060
|
[
"MIT"
] | null | null | null |
src/data_management/tests.py
|
tee-huynh/pipeline-profiles
|
1bdbe28f3d8d2d9b0be1546253a9af5e6cf95060
|
[
"MIT"
] | null | null | null |
import unittest
from incidents import process_incidents
from conditions import process_conditions
from util import most_common
import pandas as pd
class TestUtil(unittest.TestCase):
testData = {'row_1': [5, 3, 2, 1, 0, 0, 0, 1], 'row_2': ['e', 'a', 'b', 'c', 'd', 'e', 'e', 'c']}
df = pd.DataFrame.from_dict(testData, orient='columns')
def testMostCommonText1(self):
meta = {}
meta = most_common(self.df, meta, "row_2", "testTop1", top=1)
self.assertEqual(meta["testTop1"], "e")
def testMostCommonNumber1(self):
meta = {}
meta = most_common(self.df, meta, "row_1", "testTop1", top=1)
self.assertEqual(meta["testTop1"], "0")
def testMostCommonText2(self):
meta = {}
meta = most_common(self.df, meta, "row_2", "testTop2", top=2)
self.assertEqual(meta["testTop2"], {'e': 3, 'c': 2})
class TestNovaIncidents(unittest.TestCase):
df, volume, meta = process_incidents(remote=False, companies=['NOVA Gas Transmission Ltd.'], test=True, lang='en')
dfFR, volumeFR, metaFR = process_incidents(remote=False, companies=['NOVA Gas Transmission Ltd.'], test=True, lang='fr')
def countIncidentType(self, iType, df):
count = 0
for t in df['Incident Types']:
if iType in t:
count = count + 1
return count
def testEngEqualToFra(self):
self.assertEqual(len(self.df), len(self.dfFR))
self.assertEqual(self.countIncidentType("Adverse Environmental Effects", self.df),
self.countIncidentType("Effets environnementaux négatifs", self.dfFR))
self.assertEqual(self.meta["seriousEvents"]["Adverse Environmental Effects"],
self.metaFR["seriousEvents"]["Adverse Environmental Effects"])
self.assertEqual(self.meta["seriousEvents"]["Serious Injury (CER or TSB)"],
self.metaFR["seriousEvents"]["Serious Injury (CER or TSB)"])
self.assertEqual(self.meta["seriousEvents"]["Fatality"],
self.metaFR["seriousEvents"]["Fatality"])
def testTotal(self):
self.assertEqual(len(self.df), 330) # total incidents for NGTL
self.assertEqual(len(self.volume), 89) # total release incidents
def testIncidentTypes(self):
# test on full NGTL data
self.assertEqual(self.countIncidentType("Adverse Environmental Effects", self.df), 7)
self.assertEqual(self.countIncidentType("Serious Injury (CER or TSB)", self.df), 12)
self.assertEqual(self.countIncidentType("Fatality", self.df), 1)
# test on calcualted sumamry metadata
self.assertEqual(self.meta["seriousEvents"]["Adverse Environmental Effects"], 7)
self.assertEqual(self.meta["seriousEvents"]["Serious Injury (CER or TSB)"], 12)
self.assertEqual(self.meta["seriousEvents"]["Fatality"], 1)
def testVariableCounts(self):
substance = self.volume[self.volume['Substance'] == "Natural Gas - Sweet"].copy()
status = self.volume[self.volume['Status'] == "Closed"].copy()
year = self.volume[self.volume['Year'] == 2013].copy()
self.assertEqual(len(substance), 83)
self.assertEqual(len(status), 82)
self.assertEqual(len(year), 2)
trueSubstanceRelease = 38370485
self.assertTrue(trueSubstanceRelease-1 <= int(substance['Approximate Volume Released'].sum()) <= trueSubstanceRelease+1)
trueStatusRelease = 26871755
self.assertTrue(trueStatusRelease-1 <= int(status['Approximate Volume Released'].sum()) <= trueStatusRelease)
trueYearRelease = 20800000
self.assertTrue(trueYearRelease-1 <= int(year['Approximate Volume Released'].sum()) <= trueYearRelease+1)
def testTrends(self):
year = self.volume[self.volume['Year'] == 2016].copy()
self.assertEqual(len(year), 8)
class NovaTotalConditions(unittest.TestCase):
company_df, regions, mapMeta, meta = process_conditions(remote=False, companies=['NOVA Gas Transmission Ltd.'], test=True, lang='en')
def testCompanyData(self):
in_Progress = self.company_df[self.company_df['Condition Status'] == "In Progress"].copy().reset_index(drop=True)
closed = self.company_df[self.company_df['Condition Status'] == "Closed"].copy().reset_index(drop=True)
self.assertEqual(len(self.company_df), 1569)
self.assertEqual(len(in_Progress), 157)
self.assertEqual(len(closed), 1412)
def testMeta(self):
self.assertEqual(self.meta["summary"]["Closed"], 1367)
self.assertEqual(self.meta["summary"]["In Progress"], 151)
self.assertEqual(self.meta["summary"]["notOnMap"]["total"], 51)
self.assertEqual(self.meta["summary"]["notOnMap"]["status"]["Closed"], 45)
self.assertEqual(self.meta["summary"]["notOnMap"]["status"]["In Progress"], 6)
total = self.meta["summary"]["Closed"] + self.meta["summary"]["In Progress"] + self.meta["summary"]["notOnMap"]["total"]
self.assertEqual(total, 1569)
def testMapMeta(self):
red_deer = self.mapMeta[self.mapMeta['id'] == "Red Deer"].copy().reset_index(drop=True)
self.assertEqual(red_deer.loc[0, "In Progress"], 9)
self.assertEqual(red_deer.loc[0, "Closed"], 35)
if __name__ == "__main__":
unittest.main()
| 48.027027 | 137 | 0.652035 |
4a1aebb48fc7d88ed33340ab791a35d1f3cf70d1
| 13,848 |
py
|
Python
|
canvasapi/outcome.py
|
jessemcbride/canvasapi
|
a8f802f10ea372e8934ac0b51cef499c4fd4f172
|
[
"MIT"
] | null | null | null |
canvasapi/outcome.py
|
jessemcbride/canvasapi
|
a8f802f10ea372e8934ac0b51cef499c4fd4f172
|
[
"MIT"
] | null | null | null |
canvasapi/outcome.py
|
jessemcbride/canvasapi
|
a8f802f10ea372e8934ac0b51cef499c4fd4f172
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
from six import python_2_unicode_compatible
from canvasapi.canvas_object import CanvasObject
from canvasapi.paginated_list import PaginatedList
from canvasapi.util import combine_kwargs, obj_or_id
@python_2_unicode_compatible
class Outcome(CanvasObject):
def __str__(self):
return "{} ({})".format(self.title, self.url)
def update(self, **kwargs):
"""
Modify an existing outcome.
:calls: `PUT /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.update>`_
:returns: True if updated, False otherwise.
:rtype: bool
"""
response = self._requester.request(
'PUT',
'outcomes/{}'.format(self.id),
_kwargs=combine_kwargs(**kwargs)
)
if 'id' in response.json():
super(Outcome, self).set_attributes(response.json())
return 'id' in response.json()
@python_2_unicode_compatible
class OutcomeLink(CanvasObject):
def __str__(self):
return "Group {} with Outcome {} ({})".format(
self.outcome_group,
self.outcome,
self.url
)
def context_ref(self):
if self.context_type == 'Course':
return 'courses/{}'.format(self.context_id)
elif self.context_type == 'Account':
return 'accounts/{}'.format(self.context_id)
def get_outcome(self):
"""
Return the linked outcome
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:returns: Outcome object that was in the OutcomeLink
:rtype: :class:`canvasapi.outcome.Outcome`
"""
oid = self.outcome['id']
response = self._requester.request(
'GET',
'outcomes/{}'.format(oid)
)
return Outcome(self._requester, response.json())
def get_outcome_group(self):
"""
Return the linked outcome group
:calls: `GET /api/v1/global/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_
or `GET /api/v1/accounts/:account_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_
or `GET /api/v1/courses/:course_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_
:returns: Linked outcome group object.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
ogid = self.outcome_group['id']
response = self._requester.request(
'GET',
'{}/outcome_groups/{}'.format(self.context_ref(), ogid)
)
return OutcomeGroup(self._requester, response.json())
@python_2_unicode_compatible
class OutcomeGroup(CanvasObject):
def __str__(self):
return "{} ({})".format(self.title, self.url)
def context_ref(self):
if self.context_type == 'Course':
return 'courses/{}'.format(self.context_id)
elif self.context_type == 'Account':
return 'accounts/{}'.format(self.context_id)
elif self.context_type is None:
return 'global'
def update(self, **kwargs):
"""
Update an outcome group.
:calls: `PUT /api/v1/global/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.update>`_
or `PUT /api/v1/accounts/:account_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.update>`_
or `PUT /api/v1/courses/:course_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.update>`_
:returns: True if updated, False otherwise.
:rtype: bool
"""
response = self._requester.request(
'PUT',
'{}/outcome_groups/{}'.format(self.context_ref(), self.id),
_kwargs=combine_kwargs(**kwargs)
)
if 'id' in response.json():
super(OutcomeGroup, self).set_attributes(response.json())
return 'id' in response.json()
def delete(self):
"""
Delete an outcome group.
:calls: `DELETE /api/v1/global/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.destroy>`_
or `DELETE /api/v1/accounts/:account_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.destroy>`_
or `DELETE /api/v1/courses/:course_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.destroy>`_
:returns: True if successful, false if failed.
:rtype: bool
"""
response = self._requester.request(
'DELETE',
'{}/outcome_groups/{}'.format(self.context_ref(), self.id)
)
if 'id' in response.json():
super(OutcomeGroup, self).set_attributes(response.json())
return 'id' in response.json()
def list_linked_outcomes(self, **kwargs):
"""
List linked outcomes.
:calls: `GET /api/v1/global/outcome_groups/:id/outcomes \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.outcomes>`_
or `GET /api/v1/accounts/:account_id/outcome_groups/:id/outcomes \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.outcomes>`_
or `GET /api/v1/courses/:course_id/outcome_groups/:id/outcomes \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.outcomes>`_
:returns: Paginated List of Outcomes linked to the group.
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.outcome.OutcomeLink`
"""
return PaginatedList(
OutcomeLink,
self._requester,
'GET',
'{}/outcome_groups/{}/outcomes'.format(self.context_ref(), self.id),
_kwargs=combine_kwargs(**kwargs)
)
def link_existing(self, outcome):
"""
Link to an existing Outcome.
:calls: `PUT /api/v1/global/outcome_groups/:id/outcomes/:outcome_id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link>`_
or `PUT /api/v1/accounts/:account_id/outcome_groups/:id/outcomes/:outcome_id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link>`_
or `PUT /api/v1/courses/:course_id/outcome_groups/:id/outcomes/:outcome_id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link>`_
:param outcome: The object or ID of the outcome.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: OutcomeLink object with current OutcomeGroup and newly linked Outcome.
:rtype: :class:`canvasapi.outcome.OutcomeLink`
"""
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self._requester.request(
'PUT',
'{}/outcome_groups/{}/outcomes/{}'.format(
self.context_ref(),
self.id,
outcome_id
)
)
return OutcomeLink(self._requester, response.json())
def link_new(self, title, **kwargs):
"""
Create a new Outcome and link it to this OutcomeGroup
:calls: `POST /api/v1/global/outcome_groups/:id/outcomes \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link>`_
or `POST /api/v1/accounts/:account_id/outcome_groups/:id/outcomes \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link>`_
or `POST /api/v1/courses/:course_id/outcome_groups/:id/outcomes \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link>`_
:param title: The title of the new outcome.
:type title: str
:returns: OutcomeLink object with current OutcomeGroup and newly linked Outcome.
:rtype: :class:`canvasapi.outcome.OutcomeLink`
"""
response = self._requester.request(
'POST',
'{}/outcome_groups/{}/outcomes'.format(self.context_ref(), self.id),
title=title,
_kwargs=combine_kwargs(**kwargs)
)
return OutcomeLink(self._requester, response.json())
def unlink_outcome(self, outcome):
"""
Remove an Outcome from and OutcomeLink
:calls: `DELETE /api/v1/global/outcome_groups/:id/outcomes/:outcome_id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.unlink>`_
or `DELETE /api/v1/accounts/:account_id/outcome_groups/:id/outcomes/:outcome_id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.unlink>`_
or `DELETE /api/v1/courses/:course_id/outcome_groups/:id/outcomes/:outcome_id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.unlink>`_
:param outcome: The object or ID of the outcome.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: True if successful, false if failed.
:rtype: bool
"""
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self._requester.request(
'DELETE',
'{}/outcome_groups/{}/outcomes/{}'.format(
self.context_ref(),
self.id,
outcome_id
)
)
if 'context_id' in response.json():
super(OutcomeGroup, self).set_attributes(response.json())
return 'context_id' in response.json()
def list_subgroups(self):
"""
List subgroups.
:calls: `GET /api/v1/global/outcome_groups/:id/subgroups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.subgroups>`_
or `GET /api/v1/accounts/:account_id/outcome_groups/:id/subgroups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.subgroups>`_
or `GET /api/v1/courses/:course_id/outcome_groups/:id/subgroups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.subgroups>`_
:returns: Paginated List of OutcomeGroups linked to the current group.
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.outcome.OutcomeGroup`
"""
return PaginatedList(
OutcomeGroup,
self._requester,
'GET',
'{}/outcome_groups/{}/subgroups'.format(self.context_ref(), self.id)
)
def create_subgroup(self, title, **kwargs):
"""
Create a subgroup of the current group
:calls: `POST /api/v1/global/outcome_groups/:id/subgroups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.create>`_
or `POST /api/v1/accounts/:account_id/outcome_groups/:id/subgroups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.create>`_
or `POST /api/v1/courses/:course_id/outcome_groups/:id/subgroups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.create>`_
:param title: The title of the subgroup.
:type title: str
:returns: Itself as an OutcomeGroup object.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
response = self._requester.request(
'POST',
'{}/outcome_groups/{}/subgroups'.format(self.context_ref(), self.id),
title=title,
_kwargs=combine_kwargs(**kwargs)
)
return OutcomeGroup(self._requester, response.json())
def import_outcome_group(self, outcome_group):
"""
Import an outcome group as a subgroup into the current outcome group
:calls: `POST /api/v1/global/outcome_groups/:id/import \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.import>`_
or `POST /api/v1/accounts/:account_id/outcome_groups/:id/import \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.import>`_
or `POST /api/v1/courses/:course_id/outcome_groups/:id/import \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.import>`_
:param outcome: The object or ID of the outcome group to import.
:type outcome: :class:`canvasapi.outcome.OutcomeGroup` or int
:returns: Itself as an OutcomeGroup object.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
source_outcome_group_id = obj_or_id(outcome_group, "outcome_group", (OutcomeGroup,))
response = self._requester.request(
'POST',
'{}/outcome_groups/{}/import'.format(self.context_ref(), self.id),
source_outcome_group_id=source_outcome_group_id
)
return OutcomeGroup(self._requester, response.json())
| 40.255814 | 110 | 0.635615 |
4a1aebd074cc9e878e1bd267603039b5dc94d4e2
| 2,495 |
py
|
Python
|
sdk/python/pulumi_azure_native/fluidrelay/v20210312preview/get_fluid_relay_server_keys.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/fluidrelay/v20210312preview/get_fluid_relay_server_keys.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/fluidrelay/v20210312preview/get_fluid_relay_server_keys.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetFluidRelayServerKeysResult',
'AwaitableGetFluidRelayServerKeysResult',
'get_fluid_relay_server_keys',
]
@pulumi.output_type
class GetFluidRelayServerKeysResult:
"""
The set of available keys for this server.
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> str:
"""
The primary key for this server
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> str:
"""
The secondary key for this server
"""
return pulumi.get(self, "key2")
class AwaitableGetFluidRelayServerKeysResult(GetFluidRelayServerKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFluidRelayServerKeysResult(
key1=self.key1,
key2=self.key2)
def get_fluid_relay_server_keys(name: Optional[str] = None,
resource_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFluidRelayServerKeysResult:
"""
The set of available keys for this server.
:param str name: The resource name.
:param str resource_group: The resource group containing the resource.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroup'] = resource_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:fluidrelay/v20210312preview:getFluidRelayServerKeys', __args__, opts=opts, typ=GetFluidRelayServerKeysResult).value
return AwaitableGetFluidRelayServerKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
| 31.582278 | 165 | 0.658116 |
4a1aec5d013f97ed9c394ca65e13fd219099c810
| 1,006 |
py
|
Python
|
estrutura-repeticao-while/ex069.py
|
TacilioRodriguez/Python
|
0b98dc8336e014046c579b387013b2871024e3d0
|
[
"Unlicense"
] | null | null | null |
estrutura-repeticao-while/ex069.py
|
TacilioRodriguez/Python
|
0b98dc8336e014046c579b387013b2871024e3d0
|
[
"Unlicense"
] | null | null | null |
estrutura-repeticao-while/ex069.py
|
TacilioRodriguez/Python
|
0b98dc8336e014046c579b387013b2871024e3d0
|
[
"Unlicense"
] | null | null | null |
"""
Crie um programa que leia a idade e o sexo de varias pessoas. A cada pessoa cadastrada,
o programa deverá perguntar se o usuario quer ou nao continuar. No final, mostre:
A) quantas pessoas tem mais de 18 anos
B) quantos homens foram cadastrados
C) quantas mulheres tem mais de 20 anos
Colocar validação para digitação errada nas opções de pergunta.
"""
M = 0
maiores18 = 0
F = 0
prosseguir = ' '
while True:
idade = int(input('Digite a idade: '))
sexo = ' '
while sexo not in 'MFmf:':
sexo = str(input('Digite o sexo [M/F]: ')).strip().upper()[0]
if idade > 18:
maiores18 = maiores18 + 1
if sexo == 'M':
M = M + 1
if idade > 20 and sexo == 'F':
F = F + 1
continuar = ' '
while continuar not in 'SN':
continuar = str(input('Quer continuar [S/N]: ')).strip().upper()[0]
if continuar == 'N':
break
print(f'Tem {maiores18} mais de 18 anos. Foram cadastrados {M} Homens, e existem {F} mulheres com mais de 20 anos ')
| 27.944444 | 116 | 0.624254 |
4a1aed34058ac112f25c30aa936aae2040388c1e
| 5,146 |
py
|
Python
|
glue_astronomy/translators/tests/test_ccddata.py
|
ibusko/glue-astronomy
|
42d4188eb7b4ac29be66a218168564425decadba
|
[
"BSD-3-Clause"
] | null | null | null |
glue_astronomy/translators/tests/test_ccddata.py
|
ibusko/glue-astronomy
|
42d4188eb7b4ac29be66a218168564425decadba
|
[
"BSD-3-Clause"
] | null | null | null |
glue_astronomy/translators/tests/test_ccddata.py
|
ibusko/glue-astronomy
|
42d4188eb7b4ac29be66a218168564425decadba
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.nddata import CCDData
from astropy.wcs import WCS
from glue.core import Data, DataCollection
from glue.core.component import Component
from glue.core.coordinates import Coordinates
WCS_CELESTIAL = WCS(naxis=2)
WCS_CELESTIAL.wcs.ctype = ['RA---TAN', 'DEC--TAN']
WCS_CELESTIAL.wcs.set()
@pytest.mark.parametrize('with_wcs', (False, True))
def test_to_ccddata(with_wcs):
if with_wcs:
coords = WCS_CELESTIAL
else:
coords = None
data = Data(label='image', coords=coords)
data.add_component(Component(np.array([[3.4, 2.3], [-1.1, 0.3]]), units='Jy'), 'x')
image = data.get_object(CCDData, attribute=data.id['x'])
assert image.wcs is (WCS_CELESTIAL if with_wcs else None)
assert_allclose(image.data, [[3.4, 2.3], [-1.1, 0.3]])
assert image.unit is u.Jy
data.add_subset(data.id['x'] > 1, label='bright')
image_subset = data.get_subset_object(cls=CCDData, subset_id=0,
attribute=data.id['x'])
assert image_subset.wcs is (WCS_CELESTIAL if with_wcs else None)
assert_allclose(image_subset.data, [[3.4, 2.3], [-1.1, 0.3]])
assert image_subset.unit is u.Jy
assert_equal(image_subset.mask, [[0, 0], [1, 1]])
def test_to_ccddata_unitless():
data = Data(label='image', coords=WCS_CELESTIAL)
data.add_component(Component(np.array([[3.4, 2.3], [-1.1, 0.3]])), 'x')
image = data.get_object(CCDData, attribute=data.id['x'])
assert_allclose(image.data, [[3.4, 2.3], [-1.1, 0.3]])
assert image.unit is u.one
def test_to_ccddata_invalid():
data = Data(label='not-an-image')
data.add_component(Component(np.array([3.4, 2.3, -1.1, 0.3]), units='Jy'), 'x')
with pytest.raises(ValueError) as exc:
data.get_object(CCDData, attribute=data.id['x'])
assert exc.value.args[0] == 'Only 2-dimensional datasets can be converted to CCDData'
class FakeCoordinates(Coordinates):
def pixel_to_world_values(self, *pixel):
raise NotImplementedError()
def world_to_pixel_values(self, *pixel):
raise NotImplementedError()
coords = FakeCoordinates(n_dim=2)
coords.low_level_wcs = coords
data = Data(label='image-with-custom-coords', coords=coords)
data.add_component(Component(np.array([[3, 4], [4, 5]]), units='Jy'), 'x')
with pytest.raises(TypeError) as exc:
data.get_object(CCDData, attribute=data.id['x'])
assert exc.value.args[0] == 'data.coords should be an instance of Coordinates or WCS'
def test_to_ccddata_default_attribute():
data = Data(label='image', coords=WCS_CELESTIAL)
with pytest.raises(ValueError) as exc:
data.get_object(CCDData)
assert exc.value.args[0] == 'Data object has no attributes.'
data.add_component(Component(np.array([[3, 4], [5, 6]]), units='Jy'), 'x')
image = data.get_object(CCDData)
assert_allclose(image.data, [[3, 4], [5, 6]])
assert image.unit is u.Jy
data.add_component(Component(np.array([[3, 4], [5, 6]]), units='Jy'), 'y')
with pytest.raises(ValueError) as exc:
data.get_object(CCDData)
assert exc.value.args[0] == ('Data object has more than one attribute, so '
'you will need to specify which one to use as '
'the flux for the spectrum using the attribute= '
'keyword argument.')
@pytest.mark.parametrize('with_wcs', (False, True))
def test_from_ccddata(with_wcs):
if with_wcs:
wcs = WCS_CELESTIAL
else:
wcs = None
spec = CCDData([[2, 3], [4, 5]] * u.Jy, wcs=wcs)
data_collection = DataCollection()
data_collection['image'] = spec
data = data_collection['image']
assert isinstance(data, Data)
assert len(data.main_components) == 1
assert data.main_components[0].label == 'data'
assert_allclose(data['data'], [[2, 3], [4, 5]])
component = data.get_component('data')
assert component.units == 'Jy'
# Check round-tripping
image_new = data.get_object(attribute='data')
assert isinstance(image_new, CCDData)
assert image_new.wcs is (WCS_CELESTIAL if with_wcs else None)
assert_allclose(image_new.data, [[2, 3], [4, 5]])
assert image_new.unit is u.Jy
def test_meta_round_trip():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
meta = {'BUNIT': 'Jy/beam',
'some_variable': 10}
spec = CCDData([[2, 3], [4, 5]] * u.Jy, wcs=wcs, meta=meta)
data_collection = DataCollection()
data_collection['image'] = spec
data = data_collection['image']
assert isinstance(data, Data)
assert len(data.meta) == 2
assert data.meta['BUNIT'] =='Jy/beam'
assert data.meta['some_variable'] == 10
# Check round-tripping
image_new = data.get_object(attribute='data')
assert isinstance(image_new, CCDData)
assert len(image_new.meta) == 2
assert image_new.meta['BUNIT'] =='Jy/beam'
assert image_new.meta['some_variable'] == 10
| 31.187879 | 89 | 0.64691 |
4a1aed945c56c94672fdc7dc8dc7f64e3f1f4f80
| 2,696 |
py
|
Python
|
esocial/__init__.py
|
alexdemari/libesocial
|
de095354c19c34995454b6781e7dd844732c8e05
|
[
"Apache-2.0"
] | null | null | null |
esocial/__init__.py
|
alexdemari/libesocial
|
de095354c19c34995454b6781e7dd844732c8e05
|
[
"Apache-2.0"
] | null | null | null |
esocial/__init__.py
|
alexdemari/libesocial
|
de095354c19c34995454b6781e7dd844732c8e05
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018, Qualita Seguranca e Saude Ocupacional. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""eSocial Library
Module with functions and classes to validate and sign eSocial XML events and
access eSocial government webservices to send and retrieve events batchs.
"""
__version__ = '0.0.1.rc1'
# __esocial_version__ = '2.4.02'
__esocial_version__ = '2.5.00'
__xsd_versions__ = {
'send': {
'version': '1.1.1',
'xsd': 'EnvioLoteEventos-v{}.xsd',
},
'retrieve': {
'version': '1.0.0',
'xsd': 'ConsultaLoteEventos-v{}.xsd',
},
'send_return': {
'version': '1.1.0',
'xsd': 'RetornoEnvioLoteEventos-v{}.xsd',
},
'event_return': {
'version': '1.2.1',
'xsd': 'RetornoEvento-v{}.xsd'
},
'process_return': {
'version': '1.3.0',
'xsd': 'RetornoProcessamentoLote-v{}.xsd',
},
# new on 1.5 - Communication Package
'view_employer_event_id': {
'version': '1.0.0',
'xsd': 'ConsultaIdentificadoresEventosEmpregador-v{}.xsd'
},
'view_table_event_id': {
'version': '1.0.0',
'xsd': 'ConsultaIdentificadoresEventosTabela-v{}.xsd'
},
'view_employee_event_id': {
'version': '1.0.0',
'xsd': 'ConsultaIdentificadoresEventosTrabalhador-v{}.xsd'
},
'view_event_id_return':{
'version': '1.0.0',
'xsd': 'RetornoConsultaIdentificadoresEventos-v{}.xsd'
},
}
_TARGET = 'tests'
_WS_URL = {
'tests': {
'send': 'https://webservices.producaorestrita.esocial.gov.br/servicos/empregador/enviarloteeventos/WsEnviarLoteEventos.svc?wsdl',
'retrieve': 'https://webservices.producaorestrita.esocial.gov.br/servicos/empregador/consultarloteeventos/WsConsultarLoteEventos.svc?wsdl',
},
'production': {
'send': 'https://webservices.envio.esocial.gov.br/servicos/empregador/enviarloteeventos/WsEnviarLoteEventos.svc?wsdl',
'retrieve': 'https://webservices.consulta.esocial.gov.br/servicos/empregador/consultarloteeventos/WsConsultarLoteEventos.svc?wsdl',
}
}
| 34.564103 | 147 | 0.642433 |
4a1aed974801cd5b46bcd9a954da90dfa7be5645
| 4,585 |
py
|
Python
|
model_1/utils.py
|
narcomey/mri-superresolution
|
2918dcd5bae93ab988961ee24b07e0dbd735f517
|
[
"MIT"
] | 20 |
2019-01-22T01:30:39.000Z
|
2021-09-02T09:46:52.000Z
|
model_1/utils.py
|
Edenkut/mri-superresolution
|
2918dcd5bae93ab988961ee24b07e0dbd735f517
|
[
"MIT"
] | 1 |
2020-10-28T14:48:09.000Z
|
2020-10-28T14:48:09.000Z
|
model_1/utils.py
|
Edenkut/mri-superresolution
|
2918dcd5bae93ab988961ee24b07e0dbd735f517
|
[
"MIT"
] | 7 |
2019-03-04T01:51:32.000Z
|
2021-02-25T16:49:23.000Z
|
import pydicom
import os
import math
import numpy as np
import png
from config import config
import tensorlayer as tl
import matplotlib.pyplot as plt
## Extra functions used in main.py ##
def load_info(path):
data = {}
all_patient_folders = tl.files.load_folder_list(path=path)
all_patient_folders = [folder for folder in all_patient_folders if folder[len(path)+1:-1] in str(config.patient_numbers)]
all_patient_folders.sort(key=tl.files.natural_keys)
nfolders = len(all_patient_folders)
if nfolders % 2 != 0: nfolders -= 1
print("[*] Unpackaging patient files (" + str(nfolders) + ")")
for i in range(0,nfolders,2):
patient_str = str(config.patient_numbers[int(i/2)])
print("Loading patient: " + patient_str)
hr_folder = all_patient_folders[i]
print(hr_folder)
lr_folder = all_patient_folders[i+1]
print(lr_folder)
hr_data = []
hr_files = tl.files.load_file_list(path=hr_folder, regx='.*.npy', printable=False, keep_prefix=False)
hr_files.sort(key=tl.files.natural_keys)
for file in hr_files:
hr_data.append(tl.files.load_npy_to_any(path=hr_folder + "/", name=file))
lr_data = []
lr_files = tl.files.load_file_list(path=lr_folder, regx='.*.npy', printable=False, keep_prefix=False)
lr_files.sort(key=tl.files.natural_keys)
for file in lr_files:
lr_data.append(tl.files.load_npy_to_any(path=lr_folder + "/", name=file))
data[patient_str] = [lr_data, hr_data]
return data
def convert_to_png(path):
ds = pydicom.dcmread(path)
shape = ds.pixel_array.shape
# Convert to float to avoid overflow or underflow losses.
image_2d = ds.pixel_array.astype(float)
# Rescaling grey scale between 0-255
image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0
# Convert to uint
image_2d_scaled = np.uint8(image_2d_scaled)
# Write the PNG file
destination = 'test.png'
with open(destination, 'wb') as png_file:
w = png.Writer(shape[1], shape[0], greyscale=True)
w.write(png_file, image_2d_scaled)
def write_pairs_to_file(path, data_pairs):
with open(path, 'w') as f:
for i, patient in enumerate(data_pairs):
f.write("{}\n".format("Patient " + str(config.patient_numbers[i])))
for dcm_index in patient:
f.write("{}\n".format(dcm_index))
def load_scan(path):
all_slices = [pydicom.read_file(path + '/' + s, force = True) for s in os.listdir(path)]
slice_list = []
incomplete_slices = []
for slice in all_slices:
if hasattr(slice, 'InstanceNumber'):
slice_list.append(slice)
else:
incomplete_slices.append(slice)
slice_list.sort(key = lambda x: int(x.InstanceNumber))
try:
slice_thickness = np.abs(slice_list[0].ImagePositionPatient[2] - slice_list[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slice_list[0].SliceLocation - slice_list[1].SliceLocation)
for s in slice_list:
s.SliceThickness = slice_thickness
return slice_list
def plot_iterative_losses(losses):
n = list(range(len(losses['mse_loss'])))
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(n, losses['d_loss'])
axarr[0].set_title('Discriminator Loss')
axarr[1].plot(n, losses['g_loss'])
axarr[1].set_title('Generator Loss')
axarr[1].set(xlabel='Iteration')
for ax in axarr.flat:
ax.set(ylabel='Loss')
plt.savefig("iter_main_losses.png")
plt.figure()
f, axarr = plt.subplots(3, sharex=True)
axarr[0].plot(n, losses['mse_loss'])
axarr[0].set_title('MSE Loss')
axarr[1].plot(n, losses['adv_loss'])
axarr[1].set_title('Adversarial Loss')
axarr[2].plot(n, losses['vgg_loss'])
axarr[2].set_title('VGG Loss')
axarr[2].set(xlabel='Iteration')
for ax in axarr.flat:
ax.set(ylabel='Loss')
plt.savefig("iter_all_losses.png")
def plot_total_losses(losses):
n = list(range(len(losses['d_loss'])))
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(n, losses['d_loss'])
axarr[0].set_title('Discriminator Loss')
axarr[1].plot(n, losses['g_loss'])
axarr[1].set_title('Generator Loss')
axarr[1].set(xlabel='Epoch')
for ax in axarr.flat:
ax.set(ylabel='Loss')
plt.savefig('epoch_total_losses')
def mse(gen, target):
err = np.sum((gen.astype("float") - target.astype("float")) ** 2)
err /= float(gen.shape[0] * gen.shape[1])
return err
| 35.269231 | 125 | 0.652563 |
4a1aede361c118b119eeeebe7f7feea4c7a2d13f
| 124 |
py
|
Python
|
posthog/models/filters/__init__.py
|
msnitish/posthog
|
cb86113f568e72eedcb64b5fd00c313d21e72f90
|
[
"MIT"
] | null | null | null |
posthog/models/filters/__init__.py
|
msnitish/posthog
|
cb86113f568e72eedcb64b5fd00c313d21e72f90
|
[
"MIT"
] | null | null | null |
posthog/models/filters/__init__.py
|
msnitish/posthog
|
cb86113f568e72eedcb64b5fd00c313d21e72f90
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from .filter import Filter
from .path_filter import PathFilter
from .retention_filter import RetentionFilter
| 24.8 | 45 | 0.83871 |
4a1aeed41fcc319890a10778173fd9465196f61a
| 8,214 |
py
|
Python
|
backend/balance/utils.py
|
Kodeworks/budsjetteringssystem
|
30b8c2ef0522847c316e466d299a59470e73f712
|
[
"MIT"
] | 6 |
2019-06-14T08:25:49.000Z
|
2019-09-18T18:38:04.000Z
|
backend/balance/utils.py
|
Kodeworks/liquidator-frontend
|
30b8c2ef0522847c316e466d299a59470e73f712
|
[
"MIT"
] | 113 |
2019-06-13T09:09:42.000Z
|
2019-09-21T16:43:49.000Z
|
backend/balance/utils.py
|
Kodeworks/budsjetteringssystem
|
30b8c2ef0522847c316e466d299a59470e73f712
|
[
"MIT"
] | 2 |
2019-10-21T18:07:01.000Z
|
2019-12-08T07:11:40.000Z
|
import datetime
from dateutil.relativedelta import relativedelta
from typing import Any
from dataclasses import dataclass
from django.db.models import Q
from transaction.models import Transaction, RecurringTransaction
from transaction.utils import RecurringTransactionOccurence
from .models import BankBalance
day = datetime.timedelta(days=1)
@dataclass
class Balance:
"""Estimates the balance of an account from the transactions in the account."""
company_id: int
date: datetime.date
money: int
@classmethod
def from_bank_balance(cls, bank_balance):
return cls(company_id=bank_balance.company.pk, date=bank_balance.date, money=bank_balance.money)
@classmethod
def get_transaction_balances(cls, company_id, start_date, end_date, start_balance=0):
"""Helper method that accumulates balances from transactions."""
balances = {}
balance = start_balance
transaction_queryset = Transaction.objects.filter(company=company_id, date__gte=start_date, date__lte=end_date)
recurring_transactions = RecurringTransaction.get_all_occurences(company_id, start_date, end_date, False)
transactions = {}
for transaction in transaction_queryset:
money = transaction.money
if transaction.type != Transaction.INCOME:
money = -money
if transaction.date in transactions:
transactions[transaction.date] += money
else:
transactions[transaction.date] = money
for recurring, dates in recurring_transactions:
money = recurring.template.money
if recurring.template.type != Transaction.INCOME:
money = -money
for date in dates:
if date in transactions:
transactions[date] += money
else:
transactions[date] = money
# We loop through the dictionary, sorted by date
for date, money in sorted(transactions.items(), key=lambda x: x[0]):
balance += money
balances[date] = cls(company_id=company_id, date=date, money=balance)
return balances
@classmethod
def for_date_range(cls, company_id, start_date, end_date, include_bank_balances=False):
"""Get all balances in a date range."""
balances = {}
balance = cls.for_date(company_id, start_date - day, True).money
bank_balances = BankBalance.objects.filter(company=company_id, date__gte=start_date, date__lte=end_date)
if not bank_balances:
balances = cls.get_transaction_balances(company_id, start_date, end_date, balance)
else:
if bank_balances[0].date != start_date:
period_end = bank_balances[0].date
if include_bank_balances:
period_end -= day
balances.update(cls.get_transaction_balances(company_id, start_date, period_end, balance))
elif not include_bank_balances:
balances.update(cls.get_transaction_balances(company_id, start_date, start_date, balance))
for i, bank_balance in enumerate(bank_balances):
if include_bank_balances:
balances[bank_balance.date] = cls.from_bank_balance(bank_balance)
balance = bank_balance.money
period_start = bank_balance.date + day
if len(bank_balances) > i + 1:
period_end = bank_balances[i + 1].date
if not include_bank_balances:
period_end -= day
else:
period_end = end_date
balances.update(cls.get_transaction_balances(company_id, period_start, period_end, balance))
return list(balances.values())
@classmethod
def for_date(cls, company_id, date, include_bank_balances=False):
"""Get the balance for a date."""
transactions = Transaction.objects.filter(company=company_id, date__lte=date)
try:
date_filter = Q(date__lte=date) if include_bank_balances else Q(date__lt=date)
last_bank_balance = BankBalance.objects.filter(date_filter, company=company_id).latest('date')
balance = last_bank_balance.money
if last_bank_balance.date == date:
return cls.from_bank_balance(last_bank_balance)
else:
transactions = transactions.filter(date__gt=last_bank_balance.date)
recurring_transactions = RecurringTransaction.get_all_occurences(company_id,
last_bank_balance.date + day,
date, False)
except BankBalance.DoesNotExist:
balance = 0
recurring_transactions = RecurringTransaction.get_all_occurences(company_id,
datetime.date.min,
date,
False)
for transaction in transactions:
if transaction.type == Transaction.INCOME:
balance += transaction.money
else:
balance -= transaction.money
for recurring, dates in recurring_transactions:
for _ in dates:
if recurring.template.type == Transaction.INCOME:
balance += recurring.template.money
else:
balance -= recurring.template.money
return cls(company_id=company_id, date=date, money=balance)
@dataclass
class Month:
"""A collection of useful financial information for a month."""
year: int
month: int
start_balance: int
lowest_balance: Balance
transactions: Any
recurring: Any
balances: Any
bank_balances: Any
@classmethod
def get(cls, company_id, year, month):
"""Get a Month given a year and month."""
month_start = datetime.date(year, month, 1)
month_end = month_start + relativedelta(day=31)
filter = Q(company=company_id, date__gte=month_start, date__lte=month_end)
start_balance = Balance.for_date(company_id, month_start - relativedelta(days=1))
balances = Balance.for_date_range(company_id, month_start, month_end)
transactions = Transaction.objects.filter(filter)
bank_balances = BankBalance.objects.filter(filter)
recurring = [RecurringTransactionOccurence(recurring, dates)
for recurring, dates in RecurringTransaction.get_all_occurences(company_id,
month_start,
month_end)]
# If there isn't a balance for the first day of the month we include the incoming
# balance in the calculation of the lowest balance
if balances and balances[0].date == month_start:
lowest_balance = balances[0]
elif bank_balances and bank_balances[0].date == month_start:
lowest_balance = bank_balances[0]
else:
lowest_balance = start_balance
lowest_balance = min(list(balances) + list(bank_balances) + [lowest_balance], key=lambda x: x.money)
return cls(year, month, start_balance.money, lowest_balance, transactions, recurring, balances, bank_balances)
@classmethod
def for_date_range(cls, company_id, start_date, end_date):
if end_date < start_date:
return []
month = start_date.month
year = start_date.year
end_month = end_date.month
end_year = end_date.year
result = []
while (month <= end_month and year <= end_year) or year < end_year:
result.append(cls.get(company_id, year, month))
month += 1
if month == 13:
month = 1
year += 1
return result
| 40.663366 | 119 | 0.603482 |
4a1aef934526bfa4bd4d257db171a95ca6fc835c
| 807 |
py
|
Python
|
ejabberd_python3d/defaults/arguments.py
|
Dedaldino3D/ejabberd-python3d
|
4ff4474347b89ecfda48ff05b76d3a8b3c983046
|
[
"MIT"
] | 4 |
2020-11-04T02:33:06.000Z
|
2021-08-29T23:55:47.000Z
|
ejabberd_python3d/defaults/arguments.py
|
Dedaldino3D/ejabberd-python3d
|
4ff4474347b89ecfda48ff05b76d3a8b3c983046
|
[
"MIT"
] | null | null | null |
ejabberd_python3d/defaults/arguments.py
|
Dedaldino3D/ejabberd-python3d
|
4ff4474347b89ecfda48ff05b76d3a8b3c983046
|
[
"MIT"
] | 1 |
2021-09-26T01:49:23.000Z
|
2021-09-26T01:49:23.000Z
|
from __future__ import unicode_literals
from ..abc.api import APIArgument
from ..serializers import StringSerializer, IntegerSerializer, PositiveIntegerSerializer, BooleanSerializer, \
LogLevelSerializer, ListSerializer, GenericSerializer
class GenericArgument(APIArgument):
serializer_class = GenericSerializer
class StringArgument(APIArgument):
serializer_class = StringSerializer
class IntegerArgument(APIArgument):
serializer_class = IntegerSerializer
class PositiveIntegerArgument(APIArgument):
serializer_class = PositiveIntegerSerializer
class BooleanArgument(APIArgument):
serializer_class = BooleanSerializer
class LogLevelArgument(APIArgument):
serializer_class = LogLevelSerializer
class ListArgument(APIArgument):
serializer_class = ListSerializer
| 23.735294 | 110 | 0.825279 |
4a1af0b97095df4a8b9e51d9985d490e32a23d55
| 78 |
py
|
Python
|
shellprofile.py
|
lambdamusic/scigraph-cli
|
ef5060e849ee9541f047f44c315833c9a2db2f9f
|
[
"Apache-2.0"
] | 2 |
2021-04-18T10:24:01.000Z
|
2021-04-28T12:09:17.000Z
|
shellprofile.py
|
lambdamusic/PyScigraph
|
ef5060e849ee9541f047f44c315833c9a2db2f9f
|
[
"Apache-2.0"
] | 2 |
2019-07-04T09:21:59.000Z
|
2019-07-04T17:03:07.000Z
|
shellprofile.py
|
lambdamusic/PyScigraph
|
ef5060e849ee9541f047f44c315833c9a2db2f9f
|
[
"Apache-2.0"
] | null | null | null |
# startup file for ipython
# $ ipython startup.py -i
from pyscigraph import *
| 19.5 | 26 | 0.74359 |
4a1af28d0a620d86e10b1d428db9eb9c79838f57
| 16,768 |
py
|
Python
|
slowfast/utils/checkpoint.py
|
coldmanck/VidHOI
|
523e9297202e869cfff70736336dabb35faf8d00
|
[
"Apache-2.0"
] | 26 |
2021-05-26T12:17:46.000Z
|
2022-03-24T10:30:22.000Z
|
slowfast/utils/checkpoint.py
|
coldmanck/VidHOI
|
523e9297202e869cfff70736336dabb35faf8d00
|
[
"Apache-2.0"
] | 4 |
2021-06-24T07:14:37.000Z
|
2022-03-31T07:51:20.000Z
|
slowfast/utils/checkpoint.py
|
coldmanck/VidHOI
|
523e9297202e869cfff70736336dabb35faf8d00
|
[
"Apache-2.0"
] | 7 |
2021-05-31T14:43:13.000Z
|
2022-03-03T13:32:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Functions that handle saving and loading of checkpoints."""
import copy
import numpy as np
import os
import pickle
from collections import OrderedDict
import torch
from fvcore.common.file_io import PathManager
import slowfast.utils.distributed as du
import slowfast.utils.logging as logging
from slowfast.utils.c2_model_loading import get_name_convert_func
logger = logging.get_logger(__name__)
def make_checkpoint_dir(path_to_job):
"""
Creates the checkpoint directory (if not present already).
Args:
path_to_job (string): the path to the folder of the current job.
"""
checkpoint_dir = os.path.join(path_to_job, "checkpoints")
# Create the checkpoint dir from the master process
if du.is_master_proc() and not PathManager.exists(checkpoint_dir):
try:
PathManager.mkdirs(checkpoint_dir)
except Exception:
pass
return checkpoint_dir
def get_checkpoint_dir(path_to_job):
"""
Get path for storing checkpoints.
Args:
path_to_job (string): the path to the folder of the current job.
"""
return os.path.join(path_to_job, "checkpoints")
def get_path_to_checkpoint(path_to_job, epoch):
"""
Get the full path to a checkpoint file.
Args:
path_to_job (string): the path to the folder of the current job.
epoch (int): the number of epoch for the checkpoint.
"""
name = "checkpoint_epoch_{:05d}.pyth".format(epoch)
return os.path.join(get_checkpoint_dir(path_to_job), name)
def get_last_checkpoint(path_to_job):
"""
Get the last checkpoint from the checkpointing folder.
Args:
path_to_job (string): the path to the folder of the current job.
"""
d = get_checkpoint_dir(path_to_job)
names = PathManager.ls(d) if PathManager.exists(d) else []
names = [f for f in names if "checkpoint" in f]
assert len(names), "No checkpoints found in '{}'.".format(d)
# Sort the checkpoints by epoch.
name = sorted(names)[-1]
return os.path.join(d, name)
def has_checkpoint(path_to_job):
"""
Determines if the given directory contains a checkpoint.
Args:
path_to_job (string): the path to the folder of the current job.
"""
d = get_checkpoint_dir(path_to_job)
files = PathManager.ls(d) if PathManager.exists(d) else []
return any("checkpoint" in f for f in files)
def is_checkpoint_epoch(cfg, cur_epoch, multigrid_schedule=None):
"""
Determine if a checkpoint should be saved on current epoch.
Args:
cfg (CfgNode): configs to save.
cur_epoch (int): current number of epoch of the model.
multigrid_schedule (List): schedule for multigrid training.
"""
if cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH:
return True
if multigrid_schedule is not None:
prev_epoch = 0
for s in multigrid_schedule:
if cur_epoch < s[-1]:
period = max(
(s[-1] - prev_epoch) // cfg.MULTIGRID.EVAL_FREQ + 1, 1
)
return (s[-1] - 1 - cur_epoch) % period == 0
prev_epoch = s[-1]
return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0
def save_checkpoint(path_to_job, model, optimizer, epoch, cfg):
"""
Save a checkpoint.
Args:
model (model): model to save the weight to the checkpoint.
optimizer (optim): optimizer to save the historical state.
epoch (int): current number of epoch of the model.
cfg (CfgNode): configs to save.
"""
# Save checkpoints only from the master process.
if not du.is_master_proc(cfg.NUM_GPUS * cfg.NUM_SHARDS):
return
# Ensure that the checkpoint dir exists.
PathManager.mkdirs(get_checkpoint_dir(path_to_job))
# Omit the DDP wrapper in the multi-gpu setting.
sd = model.module.state_dict() if cfg.NUM_GPUS > 1 else model.state_dict()
normalized_sd = sub_to_normal_bn(sd)
# Record the state.
checkpoint = {
"epoch": epoch,
"model_state": normalized_sd,
"optimizer_state": optimizer.state_dict(),
"cfg": cfg.dump(),
}
# Write the checkpoint.
path_to_checkpoint = get_path_to_checkpoint(path_to_job, epoch + 1)
with PathManager.open(path_to_checkpoint, "wb") as f:
torch.save(checkpoint, f)
return path_to_checkpoint
def inflate_weight(state_dict_2d, state_dict_3d):
"""
Inflate 2D model weights in state_dict_2d to the 3D model weights in
state_dict_3d. The details can be found in:
Joao Carreira, and Andrew Zisserman.
"Quo vadis, action recognition? a new model and the kinetics dataset."
Args:
state_dict_2d (OrderedDict): a dict of parameters from a 2D model.
state_dict_3d (OrderedDict): a dict of parameters from a 3D model.
Returns:
state_dict_inflated (OrderedDict): a dict of inflated parameters.
"""
state_dict_inflated = OrderedDict()
for k, v2d in state_dict_2d.items():
assert k in state_dict_3d.keys()
v3d = state_dict_3d[k]
# Inflate the weight of 2D conv to 3D conv.
if len(v2d.shape) == 4 and len(v3d.shape) == 5:
logger.info(
"Inflate {}: {} -> {}: {}".format(k, v2d.shape, k, v3d.shape)
)
# Dimension need to be match.
assert v2d.shape[-2:] == v3d.shape[-2:]
assert v2d.shape[:2] == v3d.shape[:2]
v3d = (
v2d.unsqueeze(2).repeat(1, 1, v3d.shape[2], 1, 1) / v3d.shape[2]
)
elif v2d.shape == v3d.shape:
v3d = v2d
else:
logger.info(
"Unexpected {}: {} -|> {}: {}".format(
k, v2d.shape, k, v3d.shape
)
)
state_dict_inflated[k] = v3d.clone()
return state_dict_inflated
def load_checkpoint(
path_to_checkpoint,
model,
data_parallel=True,
optimizer=None,
inflation=False,
convert_from_caffe2=False,
):
"""
Load the checkpoint from the given file. If inflation is True, inflate the
2D Conv weights from the checkpoint to 3D Conv.
Args:
path_to_checkpoint (string): path to the checkpoint to load.
model (model): model to load the weights from the checkpoint.
data_parallel (bool): if true, model is wrapped by
torch.nn.parallel.DistributedDataParallel.
optimizer (optim): optimizer to load the historical state.
inflation (bool): if True, inflate the weights from the checkpoint.
convert_from_caffe2 (bool): if True, load the model from caffe2 and
convert it to pytorch.
Returns:
(int): the number of training epoch of the checkpoint.
"""
assert PathManager.exists(
path_to_checkpoint
), "Checkpoint '{}' not found".format(path_to_checkpoint)
# Account for the DDP wrapper in the multi-gpu setting.
ms = model.module if data_parallel else model
if convert_from_caffe2:
with PathManager.open(path_to_checkpoint, "rb") as f:
caffe2_checkpoint = pickle.load(f, encoding="latin1")
state_dict = OrderedDict()
name_convert_func = get_name_convert_func()
for key in caffe2_checkpoint["blobs"].keys():
converted_key = name_convert_func(key)
converted_key = c2_normal_to_sub_bn(converted_key, ms.state_dict())
if converted_key in ms.state_dict():
c2_blob_shape = caffe2_checkpoint["blobs"][key].shape
model_blob_shape = ms.state_dict()[converted_key].shape
# Load BN stats to Sub-BN.
if (
len(model_blob_shape) == 1
and len(c2_blob_shape) == 1
and model_blob_shape[0] > c2_blob_shape[0]
and model_blob_shape[0] % c2_blob_shape[0] == 0
):
caffe2_checkpoint["blobs"][key] = np.concatenate(
[caffe2_checkpoint["blobs"][key]]
* (model_blob_shape[0] // c2_blob_shape[0])
)
c2_blob_shape = caffe2_checkpoint["blobs"][key].shape
if c2_blob_shape == tuple(model_blob_shape):
state_dict[converted_key] = torch.tensor(
caffe2_checkpoint["blobs"][key]
).clone()
logger.info(
"{}: {} => {}: {}".format(
key,
c2_blob_shape,
converted_key,
tuple(model_blob_shape),
)
)
else:
logger.warn(
"!! {}: {} does not match {}: {}".format(
key,
c2_blob_shape,
converted_key,
tuple(model_blob_shape),
)
)
else:
if not any(
prefix in key for prefix in ["momentum", "lr", "model_iter"]
):
logger.warn(
"!! {}: can not be converted, got {}".format(
key, converted_key
)
)
ms.load_state_dict(state_dict, strict=False)
epoch = -1
else:
# Load the checkpoint on CPU to avoid GPU mem spike.
with PathManager.open(path_to_checkpoint, "rb") as f:
checkpoint = torch.load(f, map_location="cpu")
model_state_dict_3d = (
model.module.state_dict() if data_parallel else model.state_dict()
)
checkpoint["model_state"] = normal_to_sub_bn(
checkpoint["model_state"], model_state_dict_3d
)
if inflation:
# Try to inflate the model.
inflated_model_dict = inflate_weight(
checkpoint["model_state"], model_state_dict_3d
)
ms.load_state_dict(inflated_model_dict, strict=False)
else:
ms.load_state_dict(checkpoint["model_state"])
# Load the optimizer state (commonly not done when fine-tuning)
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state"])
if "epoch" in checkpoint.keys():
epoch = checkpoint["epoch"]
else:
epoch = -1
return epoch
def sub_to_normal_bn(sd):
"""
Convert the Sub-BN paprameters to normal BN parameters in a state dict.
There are two copies of BN layers in a Sub-BN implementation: `bn.bn` and
`bn.split_bn`. `bn.split_bn` is used during training and
"compute_precise_bn". Before saving or evaluation, its stats are copied to
`bn.bn`. We rename `bn.bn` to `bn` and store it to be consistent with normal
BN layers.
Args:
sd (OrderedDict): a dict of parameters whitch might contain Sub-BN
parameters.
Returns:
new_sd (OrderedDict): a dict with Sub-BN parameters reshaped to
normal parameters.
"""
new_sd = copy.deepcopy(sd)
modifications = [
("bn.bn.running_mean", "bn.running_mean"),
("bn.bn.running_var", "bn.running_var"),
("bn.split_bn.num_batches_tracked", "bn.num_batches_tracked"),
]
to_remove = ["bn.bn.", ".split_bn."]
for key in sd:
for before, after in modifications:
if key.endswith(before):
new_key = key.split(before)[0] + after
new_sd[new_key] = new_sd.pop(key)
for rm in to_remove:
if rm in key and key in new_sd:
del new_sd[key]
for key in new_sd:
if key.endswith("bn.weight") or key.endswith("bn.bias"):
if len(new_sd[key].size()) == 4:
assert all(d == 1 for d in new_sd[key].size()[1:])
new_sd[key] = new_sd[key][:, 0, 0, 0]
return new_sd
def c2_normal_to_sub_bn(key, model_keys):
"""
Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.
Args:
key (OrderedDict): source dict of parameters.
mdoel_key (OrderedDict): target dict of parameters.
Returns:
new_sd (OrderedDict): converted dict of parameters.
"""
if "bn.running_" in key:
if key in model_keys:
return key
new_key = key.replace("bn.running_", "bn.split_bn.running_")
if new_key in model_keys:
return new_key
else:
return key
def normal_to_sub_bn(checkpoint_sd, model_sd):
"""
Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.
Args:
checkpoint_sd (OrderedDict): source dict of parameters.
model_sd (OrderedDict): target dict of parameters.
Returns:
new_sd (OrderedDict): converted dict of parameters.
"""
for key in model_sd:
if key not in checkpoint_sd:
if "bn.split_bn." in key:
load_key = key.replace("bn.split_bn.", "bn.")
bn_key = key.replace("bn.split_bn.", "bn.bn.")
checkpoint_sd[key] = checkpoint_sd.pop(load_key)
checkpoint_sd[bn_key] = checkpoint_sd[key]
for key in model_sd:
if key in checkpoint_sd:
model_blob_shape = model_sd[key].shape
c2_blob_shape = checkpoint_sd[key].shape
if (
len(model_blob_shape) == 1
and len(c2_blob_shape) == 1
and model_blob_shape[0] > c2_blob_shape[0]
and model_blob_shape[0] % c2_blob_shape[0] == 0
):
before_shape = checkpoint_sd[key].shape
checkpoint_sd[key] = torch.cat(
[checkpoint_sd[key]]
* (model_blob_shape[0] // c2_blob_shape[0])
)
logger.info(
"{} {} -> {}".format(
key, before_shape, checkpoint_sd[key].shape
)
)
return checkpoint_sd
def load_test_checkpoint(cfg, model):
"""
Loading checkpoint logic for testing.
"""
# Load a checkpoint to test if applicable.
if cfg.TEST.CHECKPOINT_FILE_PATH != "":
# If no checkpoint found in MODEL_VIS.CHECKPOINT_FILE_PATH or in the current
# checkpoint folder, try to load checkpoint from
# TEST.CHECKPOINT_FILE_PATH and test it.
logger.info(f'Loading checkingpoint from {cfg.TEST.CHECKPOINT_FILE_PATH}')
load_checkpoint(
cfg.TEST.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
None,
inflation=False,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
)
elif has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR)
load_checkpoint(last_checkpoint, model, cfg.NUM_GPUS > 1)
elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
# If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current
# checkpoint folder, try to load checkpoint from
# TRAIN.CHECKPOINT_FILE_PATH and test it.
load_checkpoint(
cfg.TRAIN.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
None,
inflation=False,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
)
else:
logger.info(
"Unknown way of loading checkpoint. Using with random initialization, only for debugging."
)
def load_train_checkpoint(cfg, model, optimizer):
"""
Loading checkpoint logic for training.
"""
if cfg.TRAIN.AUTO_RESUME and has_checkpoint(cfg.OUTPUT_DIR):
last_checkpoint = get_last_checkpoint(cfg.OUTPUT_DIR)
logger.info("Load from last checkpoint, {}.".format(last_checkpoint))
checkpoint_epoch = load_checkpoint(
last_checkpoint, model, cfg.NUM_GPUS > 1, optimizer
)
start_epoch = checkpoint_epoch + 1
elif cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
logger.info("Load from given checkpoint file.")
checkpoint_epoch = load_checkpoint(
cfg.TRAIN.CHECKPOINT_FILE_PATH,
model,
cfg.NUM_GPUS > 1,
optimizer,
inflation=cfg.TRAIN.CHECKPOINT_INFLATE,
convert_from_caffe2=cfg.TRAIN.CHECKPOINT_TYPE == "caffe2",
)
start_epoch = checkpoint_epoch + 1
else:
logger.info("Training from scratch.")
start_epoch = 0
return start_epoch
| 36.611354 | 102 | 0.594823 |
4a1af30560eb80ea58379dc76f0da7237be65b96
| 2,886 |
py
|
Python
|
cymunk/examples/point_query.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | 18 |
2015-03-17T07:04:58.000Z
|
2017-04-24T13:05:00.000Z
|
cymunk/examples/point_query.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | 10 |
2017-08-04T18:33:27.000Z
|
2021-05-10T03:35:56.000Z
|
cymunk/examples/point_query.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | 13 |
2015-03-12T02:38:37.000Z
|
2017-02-21T21:25:06.000Z
|
"""This example showcase point queries by highlighting the shape under the
mouse pointer.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import random
import sys
import pygame
from pygame.locals import *
from pygame.color import *
import cymunk as pm
from cymunk import Vec2d
import pygame_util
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
clock = pygame.time.Clock()
running = True
### Physics stuff
space = pm.Space()
space.gravity = Vec2d(0.0, -900.0)
## Balls
balls = []
### walls
static_lines = [pm.Segment(space.static_body, Vec2d(111.0, 280.0), Vec2d(407.0, 246.0), 1.0)
,pm.Segment(space.static_body, Vec2d(407.0, 246.0), Vec2d(407.0, 343.0), 1.0)
]
space.add(static_lines)
ticks_to_next_ball = 10
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and event.key == K_ESCAPE:
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "point_query.png")
ticks_to_next_ball -= 1
if ticks_to_next_ball <= 0:
ticks_to_next_ball = 100
mass = 10
radius = 25
inertia = pm.moment_for_circle(mass, 0, radius, Vec2d(0,0))
body = pm.Body(mass, inertia)
x = random.randint(115,350)
body.position = x, 400
shape = pm.Circle(body, radius, Vec2d(0,0))
#shape.color = THECOLORS["lightgrey"]
space.add(body, shape)
balls.append(shape)
### Clear screen
screen.fill(THECOLORS["white"])
### Draw stuff
pygame_util.draw(screen, space)
balls_to_remove = []
for ball in balls:
if ball.body.position.y < 200: balls_to_remove.append(ball)
for ball in balls_to_remove:
space.remove(ball, ball.body)
balls.remove(ball)
mouse_pos = pygame_util.get_mouse_pos(screen)
shape = space.point_query_first(Vec2d(mouse_pos))
if shape is not None:
if hasattr(shape, "radius"):
r = shape.radius + 4
else:
r = 10
p = pygame_util.to_pygame(shape.body.position, screen)
pygame.draw.circle(screen, THECOLORS["red"], p, int(r), 2)
### Update physics
dt = 1.0/60.0
for x in range(1):
space.step(dt)
### Flip screen
pygame.display.flip()
clock.tick(50)
pygame.display.set_caption("fps: " + str(clock.get_fps()))
if __name__ == '__main__':
sys.exit(main())
| 27.75 | 97 | 0.546085 |
4a1af4503d66c861611869f7c6048c9b9514c0e4
| 16,301 |
py
|
Python
|
CAP-newCluster/train_cap.py
|
ZoRoronoa/Camera-Aware-Proxy
|
352f900bbae330f18c2bfe2b3f2516fb4e31adea
|
[
"Apache-2.0"
] | null | null | null |
CAP-newCluster/train_cap.py
|
ZoRoronoa/Camera-Aware-Proxy
|
352f900bbae330f18c2bfe2b3f2516fb4e31adea
|
[
"Apache-2.0"
] | null | null | null |
CAP-newCluster/train_cap.py
|
ZoRoronoa/Camera-Aware-Proxy
|
352f900bbae330f18c2bfe2b3f2516fb4e31adea
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import numpy as np
import sys
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from reid.datasets.target_dataset import DA
from reid import models
from reid.models import stb_net
from reid.trainers import Trainer
from reid.evaluators import Evaluator
from reid.utils.data import transforms as T
from reid.utils.data.preprocessor import Preprocessor, UnsupervisedTargetPreprocessor, ClassUniformlySampler
from reid.utils.logging import Logger
from reid.loss import CAPMemory
from bisect import bisect_right
from reid.utils.evaluation_metrics.retrieval import PersonReIDMAP
from reid.utils.meters import CatMeter
from reid.img_grouping import img_association
from ChannelAug import ChannelAdap, ChannelAdapGray, ChannelExchange, ChannelRandomErasing
def get_data(data_dir, target, height, width, batch_size, re=0, workers=8):
dataset = DA(data_dir, target, generate_propagate_data=True)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
num_classes = dataset.num_train_ids
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer,
])
ca_transformer = T.Compose([
T.Pad(10),
T.RandomCrop((288, 144)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
ChannelRandomErasing(probability = 0.5),
ChannelExchange(gray = 2)
])
propagate_loader = DataLoader(
UnsupervisedTargetPreprocessor(dataset.target_train_original,
root=osp.join(dataset.target_images_dir, dataset.target_train_path),
num_cam=dataset.target_num_cam, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
ca_propagate_loader = DataLoader(
UnsupervisedTargetPreprocessor(dataset.target_train_original,
root=osp.join(dataset.target_images_dir, dataset.target_train_path),
num_cam=dataset.target_num_cam, transform=ca_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
query_loader = DataLoader(
Preprocessor(dataset.query,
root=osp.join(dataset.target_images_dir, dataset.query_path), transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
gallery_loader = DataLoader(
Preprocessor(dataset.gallery,
root=osp.join(dataset.target_images_dir, dataset.gallery_path), transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, query_loader, gallery_loader, propagate_loader, ca_propagate_loader
def update_train_loader(dataset, train_samples, updated_label, height, width, batch_size, re, workers,
all_img_cams, sample_position=7):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(EPSILON=re)
])
ca_transformer = T.Compose([
T.Pad(10),
T.RandomCrop((288, 144)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
ChannelRandomErasing(probability = 0.5),
ChannelExchange(gray = 2)
])
# obtain global accumulated label from pseudo label and cameras
pure_label = updated_label[updated_label>=0]
pure_cams = all_img_cams[updated_label>=0]
accumulate_labels = np.zeros(pure_label.shape, pure_label.dtype)
prev_id_count = 0
id_count_each_cam = []
for this_cam in np.unique(pure_cams):
percam_labels = pure_label[pure_cams == this_cam]
unique_id = np.unique(percam_labels)
id_count_each_cam.append(len(unique_id))
id_dict = {ID: i for i, ID in enumerate(unique_id.tolist())}
for i in range(len(percam_labels)):
percam_labels[i] = id_dict[percam_labels[i]]
accumulate_labels[pure_cams == this_cam] = percam_labels + prev_id_count
prev_id_count += len(unique_id)
print(' sum(id_count_each_cam)= {}'.format(sum(id_count_each_cam)))
new_accum_labels = -1*np.ones(updated_label.shape, updated_label.dtype)
new_accum_labels[updated_label>=0] = accumulate_labels
# update sample list
new_train_samples = []
for sample in train_samples:
lbl = updated_label[sample[3]]
if lbl != -1:
assert(new_accum_labels[sample[3]]>=0)
new_sample = sample + (lbl, new_accum_labels[sample[3]])
new_train_samples.append(new_sample)
target_train_loader = DataLoader(
UnsupervisedTargetPreprocessor(new_train_samples, root=osp.join(dataset.target_images_dir, dataset.target_train_path),
num_cam=dataset.target_num_cam, transform=ca_transformer, has_pseudo_label=True),
batch_size=batch_size, num_workers=workers, pin_memory=True, drop_last=True,
sampler=ClassUniformlySampler(new_train_samples, class_position=sample_position, k=4))
return target_train_loader, len(new_train_samples)
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3, warmup_iters=500,
warmup_method="linear", last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / float(self.warmup_iters)
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def test_model(model, query_loader, gallery_loader):
model.eval()
# meters
query_features_meter, query_pids_meter, query_cids_meter = CatMeter(), CatMeter(), CatMeter()
gallery_features_meter, gallery_pids_meter, gallery_cids_meter = CatMeter(), CatMeter(), CatMeter()
# init dataset
loaders = [query_loader, gallery_loader]
# compute query and gallery features
with torch.no_grad():
for loader_id, loader in enumerate(loaders):
for data in loader:
images = data[0]
pids = data[2]
cids = data[3]
images = images.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
features = model(images)
# save as query features
if loader_id == 0:
query_features_meter.update(features.data)
query_pids_meter.update(pids)
query_cids_meter.update(cids)
# save as gallery features
elif loader_id == 1:
gallery_features_meter.update(features.data)
gallery_pids_meter.update(pids)
gallery_cids_meter.update(cids)
query_features = query_features_meter.get_val_numpy()
gallery_features = gallery_features_meter.get_val_numpy()
# compute mAP and rank@k
result = PersonReIDMAP(
query_features, query_cids_meter.get_val_numpy(), query_pids_meter.get_val_numpy(),
gallery_features, gallery_cids_meter.get_val_numpy(), gallery_pids_meter.get_val_numpy(), dist='cosine')
return result.mAP, result.CMC[0], result.CMC[4], result.CMC[9], result.CMC[19]
def main(args):
cudnn.benchmark = True
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Redirect print to both console and log file
if not args.evaluate:
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print('log_dir= ', args.logs_dir)
# Print logs
print('args= ', args)
saved_label = []
# Create data loaders
dataset, num_classes, query_loader, gallery_loader, propagate_loader, propagate_loader_ca = get_data(
args.data_dir, args.target, args.height, args.width, args.batch_size, args.re, args.workers)
# Create model
model = stb_net.MemoryBankModel(out_dim=2048, use_bnneck=args.use_bnneck)
# Create memory bank
cap_memory = CAPMemory(beta=args.inv_beta, alpha=args.inv_alpha, all_img_cams=dataset.target_train_all_img_cams)
# Set model
# model = nn.DataParallel(model.to(device))
model = model.to(device)
cap_memory = cap_memory.to(device)
print(args.load_ckpt)
# Load from checkpoint
if len(args.load_ckpt)>0:
print(' Loading pre-trained model: {}'.format(args.load_ckpt))
trained_dict = torch.load(args.load_ckpt)
filtered_trained_dict = {k: v for k, v in trained_dict.items() if not k.startswith('module.classifier')}
for k in filtered_trained_dict.keys():
if 'embeding' in k:
print('pretrained model has key= {}'.format(k))
model_dict = model.state_dict()
model_dict.update(filtered_trained_dict)
model.load_state_dict(model_dict)
# Evaluator
if args.evaluate:
print("Test:")
eval_results = test_model(model, query_loader, gallery_loader)
print('rank1: %.4f, rank5: %.4f, rank10: %.4f, rank20: %.4f, mAP: %.4f'
% (eval_results[1], eval_results[2], eval_results[3], eval_results[4], eval_results[0]))
return
# Optimizer
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = args.base_lr
weight_decay = args.weight_decay
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
optimizer = torch.optim.Adam(params)
lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.01, warmup_iters=10)
# Trainer
trainer = Trainer(model, cap_memory)
# Start training
# len mark
all_len = []
for epoch in range(args.epochs):
lr_scheduler.step(epoch)
# image grouping
print('Epoch {} image grouping:'.format(epoch))
updated_label, init_intra_id_feat = img_association(model, propagate_loader_ca, min_sample=4,
eps=args.thresh, rerank=True, k1=20, k2=6, intra_id_reinitialize=True)
updated_len = len(updated_label[updated_label >= 0])
all_len.append(updated_len)
ratio = (max(all_len[max(0, epoch-4):]) - min(all_len[max(0, epoch-4):])) / min(all_len[max(0, epoch-4):])
print(all_len)
print("Min last five: ", min(all_len[max(0, epoch-4):]))
print("Max last five: ", max(all_len[max(0, epoch-4):]))
print("Rat last five: ", ratio)
if ratio < 0.01 and ratio > 0.001:
saved_label = updated_label
print("Max length ===>" + str(updated_len))
np.save(osp.join(args.logs_dir, str(epoch)+"label.npy"), saved_label)
# break
# all_len.append(updated_len)
# update train loader
new_train_loader, loader_size = update_train_loader(dataset, dataset.target_train, updated_label, args.height, args.width,
args.batch_size, args.re, args.workers, dataset.target_train_all_img_cams, sample_position=5)
num_batch = int(float(loader_size)/args.batch_size)
# train an epoch
trainer.train(epoch, new_train_loader, optimizer,
num_batch=num_batch, all_pseudo_label=torch.from_numpy(updated_label).to(torch.device('cuda')),
init_intra_id_feat=init_intra_id_feat)
# test
if (epoch+1)%5 == 0:
torch.save(model.state_dict(), osp.join(args.logs_dir, 'final_model_epoch_'+str(epoch+1)+'.pth'))
print('Epoch ' + str(epoch + 1)+ ' Model saved.')
print('Test with epoch {} model:'.format(epoch))
eval_results = test_model(model, query_loader, gallery_loader)
print(' rank1: %.4f, rank5: %.4f, rank10: %.4f, rank20: %.4f, mAP: %.4f'
% (eval_results[1], eval_results[2], eval_results[3], eval_results[4], eval_results[0]))
# save final model
if (epoch+1)%args.epochs == 0:
torch.save(model.state_dict(), osp.join(args.logs_dir, 'final_model_epoch_'+str(epoch+1)+'.pth'))
print('Final Model saved.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Camera Aware Proxies for Unsupervised Person Re-ID")
# target dataset
parser.add_argument('--target', type=str, default='market')
# imgs setting
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--height', type=int, default=288, help="input height, default: 288")
parser.add_argument('--width', type=int, default=144, help="input width, default: 144")
# random erasing
parser.add_argument('--re', type=float, default=0.5)
# model
parser.add_argument('--arch', type=str, default='resnet50', choices=models.names())
parser.add_argument('--features', type=int, default=2048)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--use_bnneck', action='store_true', default=True)
parser.add_argument('--pool_type', type=str, default='avgpool')
# optimizer
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--base_lr', type=float, default=0.00035) # for adam
parser.add_argument('--milestones',type=int, nargs='+', default=[20, 40]) # for adam
# training configs
parser.add_argument('--resume', type=str, default='', metavar='PATH')
parser.add_argument('--evaluate', action='store_true', help="evaluation only", default=False)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--print_freq', type=int, default=1)
# misc
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data_dir', type=str, metavar='PATH', default=osp.join(working_dir, 'data'))
parser.add_argument('--logs_dir', type=str, metavar='PATH', default=osp.join(working_dir, 'logs'))
parser.add_argument('--load_ckpt', type=str, default='')
# loss learning
parser.add_argument('--inv_alpha', type=float, default=0.2, help='update rate for the memory')
parser.add_argument('--inv_beta', type=float, default=0.07, help='temperature for contrastive loss')
parser.add_argument('--thresh', type=int, default=0.5, help='threshold for clustering')
args = parser.parse_args()
# args.load_ckpt = ''
# args.evaluate = False
# args.use_bnneck = True
main(args)
# CUDA_VISIBLE_DEVICES=0 python train_cap.py --target 'VeRi' --data_dir '/home/xxx/folder/dataset' --logs_dir 'VeRi_logs'
| 43.469333 | 130 | 0.653886 |
4a1af489ef332b1169d5f2a65ae593761883ecb4
| 2,868 |
py
|
Python
|
examples/addressable/plot_pairing.py
|
d-michail/python-jheaps
|
65e1f4036b125dc065dc4398bf2eac0e95726f7a
|
[
"Apache-2.0"
] | null | null | null |
examples/addressable/plot_pairing.py
|
d-michail/python-jheaps
|
65e1f4036b125dc065dc4398bf2eac0e95726f7a
|
[
"Apache-2.0"
] | null | null | null |
examples/addressable/plot_pairing.py
|
d-michail/python-jheaps
|
65e1f4036b125dc065dc4398bf2eac0e95726f7a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Pairing Heap
============
In this example we create a pairing heap.
"""
# %%
# Start by importing the package.
import jheaps
# %%
# Create the pairing heap using the following factory method. By default all
# addressable heaps have `float` keys and `int` values. This can be adjusted by
# parameters `key_type` and `value_type` in the factory method which can take
# values `float`, `int`, `object`.
heap = jheaps.create_addressable_pairing_heap()
# %%
# Adding elements can be performed using insert. We next add an element with key
# equal to 3.14 and value 100. Moreover, we add a few more elements.
heap.insert(3.14, 100)
for i in range(1, 100):
heap.insert(i, 1000+i)
# %%
# Now our heap has 100 elements.
print ('Total elements so far: {}'.format(len(heap)))
# %%
# If we never need to refer to that element, except from possible accessing it when
# its key is minimum in the heap, we are done. Otherwise, when inserting an element we
# are presented with a `handle` which we can later use to refer to that particular element.
handle1 = heap.insert(15.3, 200)
# %%
# Using the handle we can print the key and value of the element
print('key: {}, value: {}'.format(handle1.key, handle1.value))
# %%
# We can also adjust its value
handle1.value = 250
print('key: {}, value: {}'.format(handle1.key, handle1.value))
# %%
# Adjusting the key is more limited as we can only increase an element's priority, thus decrease
# its key. We next find the minimum element in the heap and make the element referred by
# `handle1` as the new minimum.
cur_min_key = heap.find_min().key
handle1.decrease_key(cur_min_key - 1.0)
print('key: {}, value: {}'.format(handle1.key, handle1.value))
# %%
# Method `find_min` returns a handle to the current minimum element.
handle2 = heap.find_min()
print('key: {}, value: {}'.format(handle2.key, handle2.value))
# %%
# Method `delete_min` deletes the minimum element and returns a handle. Be
# careful with that handle as it allows you to read the key and read/write the
# value but calling any other method (such as `decrease_key` or `delete`) on that handle
# will raise an error.
handle3 = heap.delete_min()
print('key: {}, value: {}'.format(handle3.key, handle3.value))
# %%
# Except from decreasing the key, handles are also useful when we want to delete an element
# which is not the element with the minimum key. We next insert an element and then remove it.
#
print('Size of heap before insertion: {}'.format(len(heap)))
handle4 = heap.insert(50.5, 103)
print('Size of heap after insertion: {}'.format(len(heap)))
handle4.delete()
print('Size of heap after deletion: {}'.format(len(heap)))
# %%
# Clearing the heap can be done using method `clear`.
heap.clear()
print('Size of heap: {}'.format(len(heap)))
print('Heap is empty: {}'.format(heap.is_empty()))
| 29.265306 | 96 | 0.703626 |
4a1af5b5d95dd5f188851af83e59e0a056cd8ace
| 2,551 |
py
|
Python
|
gdal/swig/python/samples/gdal_create_pdf.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 3,100 |
2015-01-02T10:33:40.000Z
|
2022-03-31T02:06:51.000Z
|
gdal/swig/python/samples/gdal_create_pdf.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 3,496 |
2015-01-06T16:53:30.000Z
|
2022-03-31T20:18:51.000Z
|
gdal/swig/python/samples/gdal_create_pdf.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 2,036 |
2015-01-08T20:22:12.000Z
|
2022-03-31T10:24:08.000Z
|
#!/usr/bin/env python3
###############################################################################
# $Id$
#
# Project: GDAL samples
# Purpose: Create a PDF from a XML composition file
# Author: Even Rouault <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2019, Even Rouault<even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import gdal
def Usage():
print('Usage: gdal_create_pdf composition.xml out.pdf')
return -1
def gdal_create_pdf(argv):
srcfile = None
targetfile = None
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return -1
for i in range(1, len(argv)):
if argv[i][0] == '-':
print('Unrecognized option : %s' % argv[i])
return Usage()
elif srcfile is None:
srcfile = argv[i]
elif targetfile is None:
targetfile = argv[i]
else:
print('Unexpected option : %s' % argv[i])
return Usage()
if srcfile is None or targetfile is None:
return Usage()
out_ds = gdal.GetDriverByName("PDF").Create(
targetfile, 0, 0, 0, gdal.GDT_Unknown,
options = ['COMPOSITION_FILE=' + srcfile])
return 0 if out_ds else 1
def main(argv):
gdal_create_pdf(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 33.565789 | 79 | 0.618581 |
4a1af670f8a09d280d6e32299623e0cec09043bc
| 1,410 |
py
|
Python
|
venv/Lib/site-packages/nbconvert/preprocessors/convertfigures.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 1,367 |
2015-05-24T22:59:22.000Z
|
2022-03-31T07:44:02.000Z
|
venv/Lib/site-packages/nbconvert/preprocessors/convertfigures.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 1,534 |
2015-05-08T21:47:15.000Z
|
2022-03-31T10:09:31.000Z
|
venv/Lib/site-packages/nbconvert/preprocessors/convertfigures.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 586 |
2015-05-17T01:21:02.000Z
|
2022-03-24T17:06:05.000Z
|
"""Module containing a preprocessor that converts outputs in the notebook from
one format to another.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from .base import Preprocessor
from traitlets import Unicode
class ConvertFiguresPreprocessor(Preprocessor):
"""
Converts all of the outputs in a notebook from one format to another.
"""
from_format = Unicode(help='Format the converter accepts').tag(config=True)
to_format = Unicode(help='Format the converter writes').tag(config=True)
def __init__(self, **kw):
"""
Public constructor
"""
super().__init__(**kw)
def convert_figure(self, data_format, data):
raise NotImplementedError()
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
See base.py
"""
# Loop through all of the datatypes of the outputs in the cell.
for output in cell.get('outputs', []):
if output.output_type in {'execute_result', 'display_data'} \
and self.from_format in output.data \
and self.to_format not in output.data:
output.data[self.to_format] = self.convert_figure(
self.from_format, output.data[self.from_format])
return cell, resources
| 28.77551 | 79 | 0.641844 |
4a1af6d2cced2568784ab2693ef705eaf738a125
| 357 |
py
|
Python
|
setup.py
|
JKamlah/akf-dbTools
|
bd7b661485ba578b0a3e8ae6d1a0e1601b5dc0d6
|
[
"Apache-2.0"
] | 1 |
2019-02-26T22:19:08.000Z
|
2019-02-26T22:19:08.000Z
|
setup.py
|
JKamlah/akf-dbTools
|
bd7b661485ba578b0a3e8ae6d1a0e1601b5dc0d6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
JKamlah/akf-dbTools
|
bd7b661485ba578b0a3e8ae6d1a0e1601b5dc0d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3.6
import sys
assert sys.version_info[0]==3 and sys.version_info[1]>=6,\
"Ýou must install and use akf-dbTools with Python version 3.6 or higher"
from distutils.core import setup
setup(
name='akf_dbTools',
version='1.0',
author='jkamlah',
description='Tools for the Aktienfuehrer Database',
packages=[''],
)
| 22.3125 | 76 | 0.691877 |
4a1af799308031e8c1a4337b9d991cb6bafb489f
| 4,801 |
py
|
Python
|
docs/conf.py
|
NullConvergence/pydriller
|
e75cbe1ce90e1d317973915cf79ed03fb09988a0
|
[
"Apache-2.0"
] | 583 |
2018-04-09T09:48:47.000Z
|
2022-03-23T17:27:10.000Z
|
docs/conf.py
|
NullConvergence/pydriller
|
e75cbe1ce90e1d317973915cf79ed03fb09988a0
|
[
"Apache-2.0"
] | 195 |
2018-05-25T08:10:58.000Z
|
2022-03-29T09:28:37.000Z
|
docs/conf.py
|
NullConvergence/pydriller
|
e75cbe1ce90e1d317973915cf79ed03fb09988a0
|
[
"Apache-2.0"
] | 134 |
2018-04-10T12:57:34.000Z
|
2022-03-29T13:40:35.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'PyDriller'
copyright = '2018, Davide Spadini'
author = 'Davide Spadini'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyDrillerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyDriller.tex', 'PyDriller Documentation',
'Davide Spadini', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pydriller', 'PyDriller Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyDriller', 'PyDriller Documentation',
author, 'PyDriller', 'One line description of project.',
'Miscellaneous'),
]
| 31.175325 | 79 | 0.655905 |
4a1af7c9f88360ce23709ca597d7f49be82921e3
| 3,707 |
py
|
Python
|
tests/test_connection.py
|
hulu316/zoonado
|
cb06102f95e7da9c0e418bb9e327045e012a1497
|
[
"Apache-2.0"
] | 12 |
2016-04-14T09:55:38.000Z
|
2018-01-07T13:12:47.000Z
|
tests/test_connection.py
|
hulu316/zoonado
|
cb06102f95e7da9c0e418bb9e327045e012a1497
|
[
"Apache-2.0"
] | 16 |
2016-07-21T09:45:38.000Z
|
2017-09-22T19:06:14.000Z
|
tests/test_connection.py
|
hulu316/zoonado
|
cb06102f95e7da9c0e418bb9e327045e012a1497
|
[
"Apache-2.0"
] | 8 |
2016-07-21T09:06:37.000Z
|
2019-07-26T05:48:00.000Z
|
from mock import patch, Mock, call
from tornado import testing, concurrent
from zoonado.protocol.connect import ConnectRequest, ConnectResponse
from zoonado import connection
class ConnectionTests(testing.AsyncTestCase):
def setUp(self):
super(ConnectionTests, self).setUp()
self.response_buffer = bytearray()
tcpclient_patcher = patch.object(connection, "tcpclient")
mock_tcpclient = tcpclient_patcher.start()
self.addCleanup(tcpclient_patcher.stop)
self.mock_client = mock_tcpclient.TCPClient.return_value
stream = Mock()
self.mock_client.connect.return_value = self.future_value(stream)
def read_some(num_bytes):
result = self.response_buffer[:num_bytes]
del self.response_buffer[:num_bytes]
return self.future_value(result)
def read_all():
result = self.response_buffer[:]
self.response_buffer = bytearray()
return self.future_value(result)
stream.write.return_value = self.future_value(None)
stream.read_bytes.side_effect = read_some
stream.read_until_close.side_effect = read_all
def future_value(self, value):
f = concurrent.Future()
f.set_result(value)
return f
def future_error(self, exception):
f = concurrent.Future()
f.set_exception(exception)
return f
@testing.gen_test
def test_connect_gets_version_info(self):
self.response_buffer.extend(
b"""Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT
Latency min/avg/max: 0/0/1137
Received: 21462
Sent: 21474
Connections: 2
Outstanding: 0
Zxid: 0x11171
Mode: standalone
Node count: 232"""
)
conn = connection.Connection("local", 9999, Mock())
yield conn.connect()
self.assertEqual(conn.version_info, (3, 4, 6))
self.assertEqual(conn.start_read_only, False)
self.mock_client.connect.assert_has_calls([
call("local", 9999),
call("local", 9999),
])
@testing.gen_test
def test_send_connect_returns_none_on_error(self):
conn = connection.Connection("local", 9999, Mock())
conn.stream = Mock()
conn.stream.write.return_value = self.future_value(None)
conn.stream.read_bytes.return_value = self.future_error(Exception("!"))
result = yield conn.send_connect(
ConnectRequest(
protocol_version=0,
last_seen_zxid=0,
timeout=8000,
session_id=0,
password=b'\x00',
read_only=False,
)
)
self.assertEqual(result, None)
@patch.object(connection.Connection, "read_response")
@testing.gen_test
def test_send_connect(self, read_response):
conn = connection.Connection("local", 9999, Mock())
conn.stream = Mock()
conn.stream.write.return_value = self.future_value(None)
response = ConnectResponse(
protocol_version=0,
timeout=7000,
session_id=123456,
password=b"\xc3"
)
read_response.return_value = self.future_value(
(
23, # xid
123, # zxid
response, # response
)
)
result = yield conn.send_connect(
ConnectRequest(
protocol_version=0,
last_seen_zxid=0,
timeout=8000,
session_id=0,
password=b'\x00',
read_only=False,
)
)
self.assertEqual(result, (123, response))
| 28.083333 | 79 | 0.602913 |
4a1af7e7bc6853ffea4aeb34caa943077cb20582
| 3,215 |
py
|
Python
|
examples/pipelines/multiple_subjects_bold.py
|
salma1601/process-asl-old
|
eaafb0c46b132be14005b52eb54b86d30364ec32
|
[
"BSD-3-Clause"
] | 1 |
2020-03-11T13:59:04.000Z
|
2020-03-11T13:59:04.000Z
|
examples/pipelines/multiple_subjects_bold.py
|
salma1601/process-asl-old
|
eaafb0c46b132be14005b52eb54b86d30364ec32
|
[
"BSD-3-Clause"
] | 4 |
2016-02-19T18:24:50.000Z
|
2016-02-19T18:24:58.000Z
|
examples/pipelines/multiple_subjects_bold.py
|
salma1601/process-asl-old
|
eaafb0c46b132be14005b52eb54b86d30364ec32
|
[
"BSD-3-Clause"
] | null | null | null |
"""
===============================
Multiple subjects pipeline demo
===============================
A basic multiple subjects pipeline for BOLD data.
"""
import os
import matplotlib.pylab as plt
import nipype.interfaces.spm as spm
from nipype.caching import Memory
from nilearn import plotting
from procasl import datasets, preprocessing, _utils
# Load the dataset
subjects_parent_directory = os.path.join(os.path.expanduser('~/procasl_data'),
'heroes')
heroes = datasets.load_heroes_dataset(
subjects=(0, 4, 9),
subjects_parent_directory=subjects_parent_directory,
dataset_pattern={'anat': 't1mri/acquisition1/anat*.nii',
'BOLD EPI': 'fMRI/acquisition1/vismot1_BOLDepi*.nii'})
current_directory = os.getcwd()
# Loop over subjects
for (func_file, anat_file) in zip(
heroes['BOLD EPI'], heroes['anat']):
# Create a memory context
subject_directory = os.path.relpath(anat_file, subjects_parent_directory)
subject_directory = subject_directory.split(os.sep)[0]
cache_directory = os.path.join(os.path.expanduser('~/CODE/process-asl'),
'procasl_cache', 'heroes',
subject_directory)
if not os.path.exists(cache_directory):
os.mkdir(cache_directory)
os.chdir(cache_directory) # nipype saves .m scripts in current directory
mem = Memory(cache_directory)
# Realign EPIs
realign = mem.cache(spm.Realign)
out_realign = realign(
in_files=func_file,
register_to_mean=True)
# Coregister anat to mean EPIs
coregister = mem.cache(spm.Coregister)
out_coregister = coregister(
target=out_realign.outputs.mean_image,
source=anat_file,
write_interp=3,
jobtype='estimate')
# Segment anat
segment = mem.cache(spm.Segment)
out_segment = segment(
data=anat_file,
gm_output_type=[True, False, True],
wm_output_type=[True, False, True],
csf_output_type=[True, False, True],
save_bias_corrected=True)
# Normalize anat
normalize_anat = mem.cache(spm.Normalize)
out_normalize_anat = normalize_anat(
parameter_file=out_segment.outputs.transformation_mat,
apply_to_files=[out_coregister.outputs.coregistered_source],
write_voxel_sizes=_utils.get_vox_dims(anat_file),
write_interp=1,
jobtype='write')
# Normalize EPIs
normalize_func = mem.cache(spm.Normalize)
out_normalize_func = normalize_func(
parameter_file=out_segment.outputs.transformation_mat,
apply_to_files=[out_realign.outputs.realigned_files],
write_voxel_sizes=_utils.get_vox_dims(func_file),
write_interp=1,
jobtype='write')
# Smooth EPIs
smooth = mem.cache(spm.Smooth)
out_smooth = smooth(
in_files=out_normalize_func.outputs.normalized_files,
fwhm=[5., 5., 5.])
# Plot mean smoothed EPI
average = preprocessing.Average()
average.inputs.in_file = out_smooth.outputs.smoothed_files
out_average = average.run()
plotting.plot_epi(out_average.outputs.mean_image)
plt.show()
os.chdir(current_directory)
| 32.806122 | 78 | 0.669673 |
4a1af8ca2063597c35f3eba72a5bbbf162a4176f
| 633 |
py
|
Python
|
manage.py
|
anonshubh/eCommerce-rostores-
|
7503e855d650556e216c42fc1c5b95a42bb9c501
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
anonshubh/eCommerce-rostores-
|
7503e855d650556e216c42fc1c5b95a42bb9c501
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
anonshubh/eCommerce-rostores-
|
7503e855d650556e216c42fc1c5b95a42bb9c501
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecommerce_src.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727 | 77 | 0.685624 |
4a1af93dd899de187ed6c5fef3691c8c6f5554f5
| 5,600 |
py
|
Python
|
mmdet3d/models/model_utils/transformer.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 2,216 |
2020-07-09T19:10:11.000Z
|
2022-03-31T12:39:26.000Z
|
mmdet3d/models/model_utils/transformer.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 1,174 |
2020-07-10T07:02:28.000Z
|
2022-03-31T12:38:56.000Z
|
mmdet3d/models/model_utils/transformer.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 681 |
2020-07-09T19:40:06.000Z
|
2022-03-31T11:02:24.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks.registry import ATTENTION
from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING, MultiheadAttention
from torch import nn as nn
@ATTENTION.register_module()
class GroupFree3DMHA(MultiheadAttention):
"""A warpper for torch.nn.MultiheadAttention for GroupFree3D.
This module implements MultiheadAttention with identity connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
attn_drop (float): A Dropout layer on attn_output_weights. Default 0.0.
proj_drop (float): A Dropout layer. Default 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default to False.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
dropout_layer=dict(type='DropOut', drop_prob=0.),
init_cfg=None,
batch_first=False,
**kwargs):
super().__init__(embed_dims, num_heads, attn_drop, proj_drop,
dropout_layer, init_cfg, batch_first, **kwargs)
def forward(self,
query,
key,
value,
identity,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `GroupFree3DMHA`.
**kwargs allow passing a more general data flow when combining
with other operations in `transformerlayer`.
Args:
query (Tensor): The input query with shape [num_queries, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
key (Tensor): The key tensor with shape [num_keys, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
If None, the ``query`` will be used. Defaults to None.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Defaults to None.
If None, the `key` will be used.
identity (Tensor): This tensor, with the same shape as x,
will be used for the identity link.
If None, `x` will be used. Defaults to None.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. If not None, it will
be added to `x` before forward function. Defaults to None.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Defaults to None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`. Defaults to None.
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
Same in `nn.MultiheadAttention.forward`. Defaults to None.
Returns:
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
"""
if hasattr(self, 'operation_name'):
if self.operation_name == 'self_attn':
value = value + query_pos
elif self.operation_name == 'cross_attn':
value = value + key_pos
else:
raise NotImplementedError(
f'{self.__class__.name} '
f"can't be used as {self.operation_name}")
else:
value = value + query_pos
return super(GroupFree3DMHA, self).forward(
query=query,
key=key,
value=value,
identity=identity,
query_pos=query_pos,
key_pos=key_pos,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
**kwargs)
@POSITIONAL_ENCODING.register_module()
class ConvBNPositionalEncoding(nn.Module):
"""Absolute position embedding with Conv learning.
Args:
input_channel (int): input features dim.
num_pos_feats (int): output position features dim.
Defaults to 288 to be consistent with seed features dim.
"""
def __init__(self, input_channel, num_pos_feats=288):
super().__init__()
self.position_embedding_head = nn.Sequential(
nn.Conv1d(input_channel, num_pos_feats, kernel_size=1),
nn.BatchNorm1d(num_pos_feats), nn.ReLU(inplace=True),
nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1))
def forward(self, xyz):
"""Forward pass.
Args:
xyz (Tensor): (B, N, 3) the coordinates to embed.
Returns:
Tensor: (B, num_pos_feats, N) the embeded position features.
"""
xyz = xyz.permute(0, 2, 1)
position_embedding = self.position_embedding_head(xyz)
return position_embedding
| 40.28777 | 79 | 0.591607 |
4a1af94e9c3ceea7bf149d4bea4c94ab708cbe53
| 911 |
py
|
Python
|
problems/tests/test_container_with_most_water.py
|
vinta/fuck-coding-interviews
|
915ff55963430e81134a35f65f511e5684c52f11
|
[
"MIT"
] | 590 |
2020-06-17T08:26:47.000Z
|
2022-03-30T18:47:32.000Z
|
problems/tests/test_container_with_most_water.py
|
parvathirajan/fuck-coding-interviews
|
915ff55963430e81134a35f65f511e5684c52f11
|
[
"MIT"
] | 12 |
2020-07-14T09:24:32.000Z
|
2020-11-02T03:43:47.000Z
|
problems/tests/test_container_with_most_water.py
|
parvathirajan/fuck-coding-interviews
|
915ff55963430e81134a35f65f511e5684c52f11
|
[
"MIT"
] | 75 |
2020-07-29T06:50:13.000Z
|
2022-03-13T16:14:57.000Z
|
# coding: utf-8
import unittest
from problems.container_with_most_water import Solution
from problems.container_with_most_water import Solution2
class TestCase(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test(self):
self.assertEqual(self.solution.maxArea([1, 8, 6, 2, 5, 4, 8, 3, 7]), 49)
self.assertEqual(self.solution.maxArea([2, 3, 4, 5, 18, 17, 6]), 17)
self.assertEqual(self.solution.maxArea([10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), 25)
class TestCase2(unittest.TestCase):
def setUp(self):
self.solution = Solution2()
def test(self):
self.assertEqual(self.solution.maxArea([1, 8, 6, 2, 5, 4, 8, 3, 7]), 49)
self.assertEqual(self.solution.maxArea([2, 3, 4, 5, 18, 17, 6]), 17)
self.assertEqual(self.solution.maxArea([10, 9, 8, 7, 6, 5, 4, 3, 2, 1]), 25)
if __name__ == '__main__':
unittest.main()
| 30.366667 | 84 | 0.63337 |
4a1af99257d748ac15c2663d24dcc4f3108006b0
| 7,623 |
py
|
Python
|
servicestack/utils.py
|
ServiceStack/servicestack-python
|
57577d94e9c32334c9604e5881f9f96b93a0fddb
|
[
"BSD-3-Clause"
] | 3 |
2021-07-03T17:50:05.000Z
|
2021-07-12T17:08:33.000Z
|
servicestack/utils.py
|
ServiceStack/servicestack-python
|
57577d94e9c32334c9604e5881f9f96b93a0fddb
|
[
"BSD-3-Clause"
] | null | null | null |
servicestack/utils.py
|
ServiceStack/servicestack-python
|
57577d94e9c32334c9604e5881f9f96b93a0fddb
|
[
"BSD-3-Clause"
] | null | null | null |
import base64
import json
import re
from datetime import datetime, timezone, timedelta
from typing import Optional, Any
from .log import Log
def lowercase(string): return str(string).lower()
def uppercase(string): return str(string).upper()
def snakecase(string):
string = re.sub(r"[\-\.\s]", '_', str(string))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[A-Z]", lambda matched: '_' + lowercase(matched.group(0)), string[1:])
def camelcase(string):
string = re.sub(r"\w[\s\W]+\w", '', str(string))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[\-_\.\s]([a-z])", lambda matched: uppercase(matched.group(1)), string[1:])
def capitalcase(string: str):
string = str(string)
if not string:
return string
return uppercase(string[0]) + string[1:]
def pascalcase(string: str): return capitalcase(camelcase(string))
def titlecase(string): return ' '.join([capitalcase(word) for word in snakecase(string).split("_")])
def clean_camelcase(key: str):
use_key = camelcase(key)
if use_key[-1] == '_':
use_key = use_key[0:-1]
return use_key
def ex_message(e: Exception):
if hasattr(e, 'message'):
return e.message
return str(e)
def log(o: Any):
print(o)
return o
def index_of(target: str, needle: str):
try:
return target.index(needle)
except ValueError:
return -1
def last_index_of(target: str, needle: str):
try:
return target.rindex(needle)
except ValueError:
return -1
def left_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = index_of(str_val, needle)
return str_val if pos == -1 else str_val[:pos]
def right_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = index_of(str_val, needle)
return str_val if pos == -1 else str_val[pos + len(needle):]
def last_left_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = last_index_of(str_val, needle)
return str_val if pos == -1 else str_val[:pos]
def last_right_part(str_val: Optional[str], needle: str):
if str_val is None:
return None
pos = last_index_of(str_val, needle)
return str_val if pos == -1 else str_val[pos + len(needle):]
def split_on_first(s: Optional[str], c: str):
if str is None or str == "":
return [s]
pos = index_of(s, c)
if pos >= 0:
return [s[:pos], s[pos + 1:]]
return [s]
def split_on_last(s: Optional[str], c: str):
if str is None or str == "":
return [s]
pos = last_index_of(s, c)
if pos >= 0:
return [s[:pos], s[pos + 1:]]
return [s]
def to_timespan(duration: timedelta):
total_seconds = duration.total_seconds()
whole_seconds = total_seconds // 1
seconds = whole_seconds
sec = int(seconds % 60 if seconds >= 60 else seconds)
seconds = seconds // 60
min = int(seconds % 60)
seconds = seconds // 60
hours = int(seconds % 60)
days = seconds // 24
remaining_secs = float(sec + (total_seconds - whole_seconds))
sb = ["P"]
if days > 0:
sb.append(f"{days}D")
if days == 0 or hours + min + sec + remaining_secs > 0:
sb.append("T")
if hours > 0:
sb.append(f"{hours}H")
if min > 0:
sb.append(f"{min}M")
if remaining_secs > 0:
sec_fmt = "{:.7f}".format(remaining_secs)
sec_fmt = sec_fmt.rstrip('0')
sec_fmt = sec_fmt.rstrip('.')
sb.append(sec_fmt)
sb.append("S")
elif len(sb) == 2: # PT
sb.append("0S")
xsd = ''.join(sb)
# print(f"XSD: {xsd}, {days}:{hours}:{min}:{remaining_secs}")
return xsd
def from_timespan(s: Optional[str]):
if s is None:
return None
days = 0
hours = 0
minutes = 0
seconds = 0
ms = 0.0
if s[0] != "P":
raise ValueError(f"{s} is not a valid XSD Duration")
s = s[1:] # strip P
t = split_on_first(s, 'T')
has_time = len(t) == 2
d = split_on_first(t[0], 'D')
if len(d) == 2:
days = int(d[0])
if has_time:
h = split_on_first(t[1], 'H')
if len(h) == 2:
hours = int(h[0])
m = split_on_first(h[len(h) - 1], 'M')
if len(m) == 2:
minutes = int(m[0])
s = split_on_first(m[len(m) - 1], 'S')
if len(s) == 2:
ms = float(s[0])
seconds = int(ms)
ms -= seconds
# print(f"\n\ntimedelta({str})[{has_time}] = {hours}:{minutes}:{seconds}\n\n")
return timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds, milliseconds=int(ms * 1000))
_MIN_UTC_DATE = datetime.min.replace(tzinfo=timezone.utc)
_MIN_EPOCH = _MIN_UTC_DATE.timestamp()
_MAX_UTC_DATE = datetime.max.replace(tzinfo=timezone.utc)
def to_datetime(date: datetime):
try:
return f"/Date({int(date.timestamp() * 1000)})/"
except Exception as e:
Log.debug(f"to_datetime({date}): e")
return None
def from_datetime(json_date: str):
if json_date.startswith("/Date("):
epoch_and_zone = left_part(right_part(json_date, "("), ")")
epoch_str = epoch_and_zone
if index_of(epoch_and_zone[1:], '-') >= 0:
epoch_str = last_left_part(epoch_and_zone, '-')
if index_of(epoch_and_zone[1:], '+') >= 0:
epoch_str = last_left_part(epoch_and_zone, '+')
epoch = int(epoch_str)
try:
return datetime.fromtimestamp(epoch / 1000, timezone.utc)
except Exception as e:
if epoch < _MIN_EPOCH:
return _MIN_UTC_DATE
else:
return _MAX_UTC_DATE
# need to reduce to 6f precision and remove trailing Z
has_sec_fraction = index_of(json_date, '.') >= 0
is_utc = json_date.endswith('Z')
if is_utc:
json_date = json_date[0:-1]
if has_sec_fraction:
sec_fraction = last_right_part(json_date, '.')
tz = ''
if '+' in sec_fraction:
tz = '+' + right_part(sec_fraction, '+')
sec_fraction = left_part(sec_fraction, '+')
elif '-' in sec_fraction:
sec_fraction = left_part(sec_fraction, '-')
if len(sec_fraction) > 6:
json_date = last_left_part(json_date, '.') + '.' + sec_fraction[0:6] + tz
if is_utc:
return datetime.fromisoformat(json_date).replace(tzinfo=timezone.utc)
else:
return datetime.fromisoformat(json_date)
def to_bytearray(value: Optional[bytes]):
if value is None:
return None
return base64.b64encode(value).decode('ascii')
def from_bytearray(base64str: Optional[str]):
return base64.b64decode(base64str)
def from_base64url_safe(input_str: str):
output = input_str
output = output.replace('-', '+')
output = output.replace('_', '/')
pad = len(output) % 4
if pad == 2:
output += "=="
elif pad == 3:
output += "="
elif pad != 0:
raise ValueError("Illegal base46url string!")
return base64.b64decode(output)
def _decode_base64url_payload(payload: str):
payload_bytes = from_base64url_safe(payload)
payload_json = payload_bytes.decode('utf-8')
return json.loads(payload_json)
def inspect_jwt(jwt: str):
head = _decode_base64url_payload(left_part(jwt, '.'))
body = _decode_base64url_payload(left_part(right_part(jwt, '.'), '.'))
exp = int(body['exp'])
return head, body, datetime.fromtimestamp(exp, timezone.utc)
| 26.747368 | 118 | 0.599239 |
4a1afa3059a4ff0d3f5701a8c02d7add860eb0bd
| 1,094 |
py
|
Python
|
transform_pdf2image/__init__.py
|
SilasPDJ/maeportifolios_desktop_etc
|
a341648c8161251d42055155f6fd99fd388d9f2d
|
[
"MIT"
] | null | null | null |
transform_pdf2image/__init__.py
|
SilasPDJ/maeportifolios_desktop_etc
|
a341648c8161251d42055155f6fd99fd388d9f2d
|
[
"MIT"
] | null | null | null |
transform_pdf2image/__init__.py
|
SilasPDJ/maeportifolios_desktop_etc
|
a341648c8161251d42055155f6fd99fd388d9f2d
|
[
"MIT"
] | null | null | null |
from defs_utils import *
import pdf2image
import os
def transforma_pdf_em_img_por_materia(materia, pdf_path=None):
searched = materia
if pdf_path:
list_files = list_dir(complete_name(searched, pre=pdf_path), True)
else:
list_files = list_dir(complete_name(searched), True)
volta = os.getcwd()
for file in list_files:
pages = pdf2image.convert_from_path(file)
print(file)
os.chdir(volta)
for e, page in enumerate(pages):
e_cont = e+1
dir_name = '../MATERIAS_CRIA_FILES'
dir_name += '\\'+searched+'\\'
dir_name += file.split('\\')[-1].split('-')[0]
for folder in dir_name.split('\\'):
try:
os.chdir(folder)
except (FileNotFoundError):
os.mkdir(folder)
os.chdir(folder)
os.chdir(volta)
real = '\\'.join(os.path.realpath(__file__).split('\\')[:-1])
page.save(f'{real}\\{dir_name}\\out-{e_cont}.jpg', 'JPEG')
print(dir_name)
| 30.388889 | 74 | 0.546618 |
4a1afb0f9ebda44be292a02b42237425cbdf6b60
| 5,465 |
py
|
Python
|
mediagoblin/tools/exif.py
|
stenwt/mediagoblin-quickstart-openshift
|
4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2
|
[
"CC0-1.0"
] | 1 |
2016-02-10T18:22:42.000Z
|
2016-02-10T18:22:42.000Z
|
mediagoblin/tools/exif.py
|
stenwt/mediagoblin-quickstart-openshift
|
4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2
|
[
"CC0-1.0"
] | 1 |
2016-04-19T13:03:17.000Z
|
2016-04-19T13:03:17.000Z
|
mediagoblin/tools/exif.py
|
stenwt/mediagoblin-quickstart-openshift
|
4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2
|
[
"CC0-1.0"
] | null | null | null |
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.tools.extlib.EXIF import process_file, Ratio
from mediagoblin.processing import BadMediaFail
from mediagoblin.tools.translate import pass_to_ugettext as _
# A list of tags that should be stored for faster access
USEFUL_TAGS = [
'Image Make',
'Image Model',
'EXIF FNumber',
'EXIF Flash',
'EXIF FocalLength',
'EXIF ExposureTime',
'EXIF ApertureValue',
'EXIF ExposureMode',
'EXIF ISOSpeedRatings',
'EXIF UserComment',
]
def exif_image_needs_rotation(exif_tags):
"""
Returns True if EXIF orientation requires rotation
"""
return 'Image Orientation' in exif_tags \
and exif_tags['Image Orientation'].values[0] != 1
def exif_fix_image_orientation(im, exif_tags):
"""
Translate any EXIF orientation to raw orientation
Cons:
- REDUCES IMAGE QUALITY by recompressig it
Pros:
- Prevents neck pain
"""
# Rotate image
if 'Image Orientation' in exif_tags:
rotation_map = {
3: 180,
6: 270,
8: 90}
orientation = exif_tags['Image Orientation'].values[0]
if orientation in rotation_map.keys():
im = im.rotate(
rotation_map[orientation])
return im
def extract_exif(filename):
"""
Returns EXIF tags found in file at ``filename``
"""
exif_tags = {}
try:
image = open(filename)
exif_tags = process_file(image, details=False)
except IOError:
raise BadMediaFail(_('Could not read the image file.'))
return exif_tags
def clean_exif(exif):
'''
Clean the result from anything the database cannot handle
'''
# Discard any JPEG thumbnail, for database compatibility
# and that I cannot see a case when we would use it.
# It takes up some space too.
disabled_tags = [
'Thumbnail JPEGInterchangeFormatLength',
'JPEGThumbnail',
'Thumbnail JPEGInterchangeFormat']
clean_exif = {}
for key, value in exif.items():
if not key in disabled_tags:
clean_exif[key] = _ifd_tag_to_dict(value)
return clean_exif
def _ifd_tag_to_dict(tag):
'''
Takes an IFD tag object from the EXIF library and converts it to a dict
that can be stored as JSON in the database.
'''
data = {
'printable': tag.printable,
'tag': tag.tag,
'field_type': tag.field_type,
'field_offset': tag.field_offset,
'field_length': tag.field_length,
'values': None}
if isinstance(tag.printable, str):
# Force it to be decoded as UTF-8 so that it'll fit into the DB
data['printable'] = tag.printable.decode('utf8', 'replace')
if type(tag.values) == list:
data['values'] = []
for val in tag.values:
if isinstance(val, Ratio):
data['values'].append(
_ratio_to_list(val))
else:
data['values'].append(val)
else:
if isinstance(tag.values, str):
# Force UTF-8, so that it fits into the DB
data['values'] = tag.values.decode('utf8', 'replace')
else:
data['values'] = tag.values
return data
def _ratio_to_list(ratio):
return [ratio.num, ratio.den]
def get_useful(tags):
useful = {}
for key, tag in tags.items():
if key in USEFUL_TAGS:
useful[key] = tag
return useful
def get_gps_data(tags):
"""
Processes EXIF data returned by EXIF.py
"""
gps_data = {}
if not 'Image GPSInfo' in tags:
return gps_data
try:
dms_data = {
'latitude': tags['GPS GPSLatitude'],
'longitude': tags['GPS GPSLongitude']}
for key, dat in dms_data.items():
gps_data[key] = (
lambda v:
float(v[0].num) / float(v[0].den) \
+ (float(v[1].num) / float(v[1].den) / 60) \
+ (float(v[2].num) / float(v[2].den) / (60 * 60))
)(dat.values)
if tags['GPS GPSLatitudeRef'].values == 'S':
gps_data['latitude'] /= -1
if tags['GPS GPSLongitudeRef'].values == 'W':
gps_data['longitude'] /= -1
except KeyError:
pass
try:
gps_data['direction'] = (
lambda d:
float(d.num) / float(d.den)
)(tags['GPS GPSImgDirection'].values[0])
except KeyError:
pass
try:
gps_data['altitude'] = (
lambda a:
float(a.num) / float(a.den)
)(tags['GPS GPSAltitude'].values[0])
except KeyError:
pass
return gps_data
| 27.462312 | 77 | 0.602013 |
4a1afb13d9994c623e04975e34046bc4e82b850a
| 9,783 |
py
|
Python
|
syft/grid/private_grid.py
|
shubhamsingh987/PySyft
|
ff967e3735bd7d47667d1d3e5038ba1493ca2e90
|
[
"Apache-2.0"
] | 1 |
2020-05-25T13:44:29.000Z
|
2020-05-25T13:44:29.000Z
|
syft/grid/private_grid.py
|
shubhamsingh987/PySyft
|
ff967e3735bd7d47667d1d3e5038ba1493ca2e90
|
[
"Apache-2.0"
] | 2 |
2020-03-09T09:17:06.000Z
|
2020-04-09T13:33:12.000Z
|
syft/grid/private_grid.py
|
shubhamsingh987/PySyft
|
ff967e3735bd7d47667d1d3e5038ba1493ca2e90
|
[
"Apache-2.0"
] | null | null | null |
import random
import torch
from typing import Any
from typing import Tuple
from typing import Dict
from typing import Union
# Syft imports
import syft
from syft.grid.abstract_grid import AbstractGrid
from syft.workers.node_client import NodeClient
from syft.execution.plan import Plan
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
class PrivateGridNetwork(AbstractGrid):
def __init__(self, *workers):
super().__init__()
self.workers = list(workers)
self._connect_all_nodes(self.workers, NodeClient)
def search(self, *query) -> Dict[Any, Any]:
""" Searches over a collection of workers, returning pointers to the results
grouped by worker.
Args:
query : List of tags used to identify the desired tensor.
Returns:
results : list of pointers with pointers that matches with tags.
"""
results = {}
for worker in self.workers:
worker_results = syft.local_worker.request_search(query, location=worker)
if len(worker_results) > 0:
results[worker.id] = worker_results
return results
def serve_model(
self,
model,
id: str,
mpc: bool = False,
allow_remote_inference: bool = False,
allow_download: bool = False,
n_replica: int = 1,
):
""" Choose some node(s) on grid network to host a unencrypted / encrypted model.
Args:
model: Model to be hosted.
id: Model's ID.
mpc: Boolean flag to host a plain text / encrypted model
allow_remote_inference: Allow to run inference remotely.
allow_download: Allow to copy the model and run it locally.
n_replica: Number of copies distributed through grid network.
Raises:
RuntimeError: If grid network doesn't have enough nodes to replicate the model.
NotImplementedError: If workers used by grid network aren't grid nodes.
"""
# If workers used by grid network aren't grid nodes.
if not self._check_node_type(self.workers, NodeClient):
raise NotImplementedError
if n_replica > len(self.workers):
raise RuntimeError("Not enough nodes!")
else:
nodes = random.sample(self.workers, n_replica)
for i in range(len(nodes)):
if not mpc:
# Host plain-text model
nodes[i].serve_model(
model,
model_id=id,
allow_download=allow_download,
allow_remote_inference=allow_remote_inference,
)
else:
# Host encrypted model
self._host_encrypted_model(model)
def run_remote_inference(self, id: str, data: torch.Tensor, mpc: bool = False) -> torch.Tensor:
""" Search for a specific model registered on grid network, if found,
It will run inference.
Args:
id : Model's ID.
dataset : Data used to run inference.
mpc: Boolean flag to run a plain text / encrypted model
Returns:
Tensor : Inference's result.
Raises:
NotImplementedError: If workers used by grid network aren't grid nodes.
RuntimeError: If model id not found.
"""
# If workers used by grid network aren't grid nodes.
if not self._check_node_type(self.workers, NodeClient):
raise NotImplementedError
if not mpc:
result = self._run_unencrypted_inference(id, data)
else:
result = self._run_encrypted_inference(id, data)
return result
def query_model_hosts(
self, id: str, mpc: bool = False
) -> Union["NodeClient", Tuple["NodeClient"]]:
""" Search for node host from a specific model registered on grid network, if found,
It will return the frist host/ set of hosts that contains the desired model.
Args:
id : Model's ID.
data : Data used to run inference.
mpc : Boolean flag to search for a plain text / encrypted model
Returns:
workers : First worker that contains the desired model.
Raises:
NotImplementedError: If workers used by grid network aren't grid nodes.
RuntimeError: If model id not found.
"""
# If workers used by grid network aren't grid nodes.
if not self._check_node_type(self.workers, NodeClient):
raise NotImplementedError
# Search for non mpc models.
if not mpc:
for node in self.workers:
if id in node.models:
return node
else:
# Search for MPC models
return self._query_encrypted_model_hosts(id)
def _host_encrypted_model(self, model, n_shares: int = 4):
""" This method wiil choose some grid nodes at grid network to host an encrypted model.
Args:
model: Model to be hosted.
n_shares: number of workers used by MPC protocol.
Raise:
RuntimeError : If grid network doesn't have enough workers
to host an encrypted model or if model is not a plan.
"""
# Verify if this network have enough workers.
if n_shares > len(self.workers):
raise RuntimeError("Not enough nodes!")
elif n_shares < self.SMPC_HOST_CHUNK:
raise RuntimeError("Not enough shares to perform MPC operations!")
else:
# Select N workers in your set of workers.
nodes = random.sample(self.workers, n_shares)
# Model needs to be a plan
if isinstance(model, Plan):
host = nodes[0] # Host
mpc_nodes = nodes[1:-1] # Shares
crypto_provider = nodes[-1] # Crypto Provider
# SMPC Share
model.fix_precision().share(*mpc_nodes, crypto_provider=crypto_provider)
# Host model
p_model = model.send(host)
# Save a pointer reference to this model in database.
host.serve_model(
p_model,
model_id=model.id,
allow_download=False,
allow_remote_inference=False,
mpc=True,
)
# If model isn't a plan
else:
raise RuntimeError("Model needs to be a plan to be encrypted!")
def _query_encrypted_model_hosts(self, id: str) -> Tuple["NodeClient"]:
""" Search for an encrypted model and return its mpc nodes.
Args:
id: Model's ID.
Returns:
Tuple : Tuple structure containing Host, MPC Nodes and crypto provider.
Raises:
RuntimeError: If model id not found.
"""
host = self.query_model_hosts(id)
# If it's registered on grid nodes.
if host:
model = host.search(id)[0].get(deregister_ptr=False)
mpc_nodes = set()
crypto_provider = None
# Check every state used by this plan
for state_id in model.state.state_ids:
hook = host.hook
obj = hook.local_worker.object_store.get_obj(state_id)
# Decrease in Tensor Hierarchy.
# (we want be a AdditiveSharingTensor to recover workers/crypto_provider addresses)
while not isinstance(obj, AdditiveSharingTensor):
obj = obj.child
# Get a list of mpc nodes.
nodes = map(lambda x: hook.local_worker._known_workers.get(x), obj.child.keys())
mpc_nodes.update(set(nodes))
if obj.crypto_provider:
crypto_provider = obj.crypto_provider
return (host, mpc_nodes, crypto_provider)
else:
raise RuntimeError("Model ID not found!")
def _run_unencrypted_inference(self, id: str, data) -> torch.Tensor:
""" Search for a plain-text model registered on grid network, if found,
It will run inference.
Args:
id : Model's ID.
dataset : Data used to run inference.
Returns:
Tensor : Inference's result.
Raises:
RuntimeError: If model id not found.
"""
node = self.query_model_hosts(id)
if node:
response = node.run_remote_inference(model_id=id, data=data)
return torch.tensor(response)
else:
raise RuntimeError("Model not found on Grid Network!")
def _run_encrypted_inference(self, id: str, data) -> torch.Tensor:
""" Search for an encrypted model and perform inference.
Args:
model_id: Model's ID.
data: Dataset to be shared/inferred.
copy: Boolean flag to perform encrypted inference without lose plan.
Returns:
Tensor: Inference's result.
Raises:
RuntimeError: If model id not found.
"""
host, mpc_nodes, crypto_provider = self._query_encrypted_model_hosts(id)
# Share your dataset to same SMPC Workers
shared_data = data.fix_precision().share(*mpc_nodes, crypto_provider=crypto_provider)
# Perform Inference
fetched_plan = host.hook.local_worker.fetch_plan(id, host, copy=True)
return fetched_plan(shared_data).get().float_prec()
| 37.626923 | 99 | 0.58295 |
4a1afba6eb93a9791748d83c20ca870bda2e06bb
| 10,926 |
py
|
Python
|
tests/test_checker/test_noqa.py
|
lensvol/wemake-python-styleguide
|
aec33f0855c6a6f0d853de37b958b9b33c4428d0
|
[
"MIT"
] | null | null | null |
tests/test_checker/test_noqa.py
|
lensvol/wemake-python-styleguide
|
aec33f0855c6a6f0d853de37b958b9b33c4428d0
|
[
"MIT"
] | 15 |
2020-02-22T11:09:46.000Z
|
2020-02-27T16:36:54.000Z
|
tests/test_checker/test_noqa.py
|
lensvol/wemake-python-styleguide
|
aec33f0855c6a6f0d853de37b958b9b33c4428d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Integration tests definition.
These are integration tests for several things:
1. that violation is active and enabled
2. that violation is raised for the bad code
3. that line number where violation is raised is correct
4. that `noqa` works
Docs: https://wemake-python-stylegui.de/en/latest/pages/api/contributing.html
"""
import re
import subprocess
import types
from collections import Counter
import pytest
from wemake_python_styleguide.compat.constants import PY38
#: Used to find violations' codes in output.
ERROR_PATTERN = re.compile(r'(WPS\d{3})')
#: List of ignored violations that we do not cover with `noqa` comments.
IGNORED_VIOLATIONS = (
'WPS201', # it is a module level violation
'WPS202', # since our test case is complex, that's fine
'WPS203', # it is a module level violation
'WPS204', # our tests have a lot of overused expressions
'WPS226', # we have a lot of ugly strings inside
'WPS400', # it is a module level violation
'WPS402', # we obviously use a lot of `noqa` comments
)
#: Number and count of violations that would be raised.
VERSION_SPECIFIC = types.MappingProxyType({
'WPS216': 1,
'WPS224': 1,
'WPS307': 1,
'WPS332': 0, # TODO: add test case, only works for `>= python3.8`
'WPS416': int(not PY38), # only works for `< python3.8`
'WPS451': int(PY38), # only works for `>= python3.8`
'WPS602': 2,
})
#: Number and count of violations that would be raised.
SHOULD_BE_RAISED = types.MappingProxyType({
'WPS000': 0, # logically unacceptable.
'WPS100': 0, # logically unacceptable.
'WPS101': 0, # logically unacceptable.
'WPS102': 0, # logically unacceptable.
'WPS110': 3,
'WPS111': 1,
'WPS112': 1,
'WPS113': 1,
'WPS114': 1,
'WPS115': 1,
'WPS116': 1,
'WPS117': 1,
'WPS118': 1,
'WPS119': 1,
'WPS120': 1,
'WPS121': 1,
'WPS122': 1,
'WPS123': 1,
'WPS200': 0, # logically unacceptable.
'WPS201': 0, # defined in ignored violations.
'WPS202': 0, # defined in ignored violations.
'WPS203': 0, # defined in ignored violations.
'WPS204': 0, # defined in ignored violations.
'WPS210': 1,
'WPS211': 1,
'WPS212': 1,
'WPS213': 1,
'WPS214': 1,
'WPS215': 1,
'WPS216': 0, # defined in version specific table.
'WPS217': 1,
'WPS218': 1,
'WPS219': 1,
'WPS220': 1,
'WPS221': 2,
'WPS222': 1,
'WPS223': 1,
'WPS224': 0, # defined in version specific table.
'WPS225': 1,
'WPS226': 0, # defined in ignored violations.
'WPS227': 1,
'WPS228': 1,
'WPS229': 1,
'WPS230': 1,
'WPS231': 1,
'WPS232': 0, # logically unacceptable.
'WPS233': 1,
'WPS234': 1,
'WPS300': 1,
'WPS301': 1,
'WPS302': 1,
'WPS303': 1,
'WPS304': 1,
'WPS305': 1,
'WPS306': 2,
'WPS307': 0, # defined in version specific table.
'WPS308': 1,
'WPS309': 1,
'WPS310': 4,
'WPS311': 1,
'WPS312': 1,
'WPS313': 1,
'WPS314': 1,
'WPS315': 1,
'WPS316': 1,
'WPS317': 1,
'WPS318': 3,
'WPS319': 2,
'WPS320': 2,
'WPS321': 1,
'WPS322': 1,
'WPS323': 1,
'WPS324': 1,
'WPS325': 1,
'WPS326': 1,
'WPS327': 1,
'WPS328': 2,
'WPS329': 1,
'WPS330': 1,
'WPS331': 1,
'WPS332': 0, # defined in version specific table.
'WPS333': 1,
'WPS334': 1,
'WPS335': 1,
'WPS336': 1,
'WPS337': 1,
'WPS338': 1,
'WPS339': 1,
'WPS340': 1,
'WPS341': 1,
'WPS342': 1,
'WPS343': 1,
'WPS344': 1,
'WPS345': 1,
'WPS346': 1,
'WPS347': 1,
'WPS348': 1,
'WPS349': 1,
'WPS350': 1,
'WPS351': 1,
'WPS352': 1,
'WPS353': 1,
'WPS354': 1,
'WPS355': 1,
'WPS356': 1,
'WPS400': 0, # defined in ignored violations.
'WPS401': 0, # logically unacceptable.
'WPS402': 0, # defined in ignored violations.
'WPS403': 0, # logically unacceptable.
'WPS404': 1,
'WPS405': 1,
'WPS406': 1,
'WPS407': 1,
'WPS408': 1,
'WPS409': 1,
'WPS410': 1,
'WPS411': 0, # logically unacceptable.
'WPS412': 0, # logically unacceptable.
'WPS413': 1,
'WPS414': 1,
'WPS415': 1,
'WPS416': 0, # defined in version specific table.
'WPS417': 1,
'WPS418': 1,
'WPS419': 1,
'WPS420': 2,
'WPS421': 1,
'WPS422': 1,
'WPS423': 1,
'WPS424': 1,
'WPS425': 1,
'WPS426': 1,
'WPS427': 1,
'WPS428': 2,
'WPS429': 1,
'WPS430': 1,
'WPS431': 2,
'WPS432': 2,
'WPS433': 1,
'WPS434': 1,
'WPS435': 1,
'WPS436': 1,
'WPS437': 1,
'WPS438': 4,
'WPS439': 1,
'WPS440': 1,
'WPS441': 1,
'WPS442': 2,
'WPS443': 1,
'WPS444': 1,
'WPS445': 1,
'WPS446': 1,
'WPS447': 1,
'WPS448': 1,
'WPS449': 1,
'WPS450': 1,
'WPS451': 0, # defined in version specific table.
'WPS500': 1,
'WPS501': 1,
'WPS502': 2,
'WPS503': 1,
'WPS504': 1,
'WPS505': 1,
'WPS506': 1,
'WPS507': 1,
'WPS508': 1,
'WPS509': 1,
'WPS510': 1,
'WPS511': 1,
'WPS512': 1,
'WPS513': 1,
'WPS514': 1,
'WPS515': 1,
'WPS516': 1,
'WPS517': 2,
'WPS518': 1,
'WPS519': 1,
'WPS520': 1,
'WPS521': 1,
'WPS522': 1,
'WPS523': 1,
'WPS524': 1,
'WPS525': 2,
'WPS526': 1,
'WPS527': 1,
'WPS528': 1,
'WPS529': 1,
'WPS530': 1,
'WPS600': 1,
'WPS601': 1,
'WPS602': 0, # defined in version specific table.
'WPS603': 1,
'WPS604': 2,
'WPS605': 1,
'WPS606': 1,
'WPS607': 1,
'WPS608': 1,
'WPS609': 1,
'WPS610': 1,
'WPS611': 1,
'WPS612': 1,
'WPS613': 1,
})
# Violations which may be tweaked by `i_control_code` option:
SHOULD_BE_RAISED_NO_CONTROL = types.MappingProxyType({
'WPS113': 0,
'WPS412': 0,
'WPS413': 0,
})
def _assert_errors_count_in_output(
output,
errors,
all_violations,
total=True,
):
found_errors = Counter(
(match.group(0) for match in ERROR_PATTERN.finditer(output)),
)
if total:
for violation in all_violations:
key = 'WPS{0}'.format(str(violation.code).zfill(3))
assert key in errors, 'Unlisted #noqa violation'
for found_error, found_count in found_errors.items():
assert found_error in errors, 'Violation without a #noqa count'
assert found_count == errors.get(found_error), found_error
assert set(
filter(lambda key: errors[key] != 0, errors),
) - found_errors.keys() == set()
def test_codes(all_violations):
"""Ensures that all violations are listed."""
assert len(SHOULD_BE_RAISED) == len(all_violations)
@pytest.mark.parametrize(('filename', 'violations', 'total'), [
('noqa.py', SHOULD_BE_RAISED, True),
pytest.param(
'noqa_pre38.py',
VERSION_SPECIFIC,
0,
marks=pytest.mark.skipif(PY38, reason='ast changes on 3.8'),
),
pytest.param(
'noqa38.py',
VERSION_SPECIFIC,
0,
marks=pytest.mark.skipif(not PY38, reason='ast changes on 3.8'),
),
])
def test_noqa_fixture_disabled(
absolute_path,
all_violations,
filename,
violations,
total,
):
"""End-to-End test to check that all violations are present."""
process = subprocess.Popen(
[
'flake8',
'--ignore',
','.join(IGNORED_VIOLATIONS),
'--disable-noqa',
'--isolated',
'--select',
'WPS',
absolute_path('fixtures', 'noqa', filename),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf8',
)
stdout, _ = process.communicate()
_assert_errors_count_in_output(stdout, violations, all_violations, total)
def test_noqa_fixture_disabled_no_control(
absolute_path,
all_controlled_violations,
):
"""End-to-End test to check rules controlled by `i_control_code` option."""
process = subprocess.Popen(
[
'flake8',
'--i-dont-control-code',
'--disable-noqa',
'--isolated',
'--select',
'WPS',
absolute_path('fixtures', 'noqa', 'noqa_controlled.py'),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf8',
)
stdout, _ = process.communicate()
_assert_errors_count_in_output(
stdout,
SHOULD_BE_RAISED_NO_CONTROL,
all_controlled_violations,
)
assert len(SHOULD_BE_RAISED_NO_CONTROL) == len(all_controlled_violations)
def test_noqa_fixture(absolute_path):
"""End-to-End test to check that `noqa` works."""
process = subprocess.Popen(
[
'flake8',
'--ignore',
','.join(IGNORED_VIOLATIONS),
'--isolated',
'--select',
'WPS',
absolute_path('fixtures', 'noqa', 'noqa.py'),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf8',
)
stdout, _ = process.communicate()
assert stdout.count('WPS') == 0
def test_noqa_fixture_without_ignore(absolute_path):
"""End-to-End test to check that `noqa` works without ignores."""
process = subprocess.Popen(
[
'flake8',
'--isolated',
'--select',
'WPS',
absolute_path('fixtures', 'noqa', 'noqa.py'),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf8',
)
stdout, _ = process.communicate()
for violation in IGNORED_VIOLATIONS:
assert stdout.count(violation) > 0
def test_noqa_fixture_diff(absolute_path, all_violations):
"""Ensures that our linter works in ``diff`` mode."""
process = subprocess.Popen(
[
'diff',
'-uN', # is required to ignore missing files
'missing_file', # is required to transform file to diff
absolute_path('fixtures', 'noqa', 'noqa.py'),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf8',
)
output = subprocess.check_output(
[
'flake8',
'--ignore',
','.join(IGNORED_VIOLATIONS),
'--disable-noqa',
'--isolated',
'--diff', # is required to test diffs! ;)
'--exit-zero', # to allow failures
],
stdin=process.stdout,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding='utf8',
)
process.communicate()
_assert_errors_count_in_output(output, SHOULD_BE_RAISED, all_violations)
| 24.013187 | 79 | 0.557295 |
4a1afc1740039135e924ff75b23e5ff08fc748ff
| 14,018 |
py
|
Python
|
crop_engine/nlde/operators/polybindjoin.py
|
Lars-H/slurp
|
0c7c8a5ca62145bedaff5791d6f54337674da2ea
|
[
"MIT"
] | 1 |
2021-06-29T07:59:50.000Z
|
2021-06-29T07:59:50.000Z
|
crop_engine/nlde/operators/polybindjoin.py
|
Lars-H/slurp
|
0c7c8a5ca62145bedaff5791d6f54337674da2ea
|
[
"MIT"
] | null | null | null |
crop_engine/nlde/operators/polybindjoin.py
|
Lars-H/slurp
|
0c7c8a5ca62145bedaff5791d6f54337674da2ea
|
[
"MIT"
] | null | null | null |
"""
Created on Sep 4, 2020
Physical operator that implements a JOIN.
The intermediate results are stored in queues and processed incrementally.
@author: Lars Heling
"""
from multiprocessing import Queue, Value, Process
from Queue import Empty
import math
from operatorstructures import Tuple, Record, RJTTail
from time import time
from random import randint
from nlde.util.misc import compatible_solutions
from nlde.operators.independent_operator import IndependentOperator
import logging
logger = logging.getLogger("nlde_debug")
class Poly_Bind_Join(object):
def __init__(self, id_operator, joinvars, eddies, eddy=None, left_card=-1):
self.left_table = dict()
self.right_table = dict()
self.id_operator = id_operator
self.vars = joinvars
self.eof = Tuple("EOF", 0, 0, set(), self.id_operator)
self.eddies = eddies
if eddy:
self.eddy = eddy
else:
self.eddy = randint(1, self.eddies)
self.left = None
self.right = None
self.qresults = None
self.sources = None
self.probing = Value('i', 1)
self.independent_inputs = 1
self.results_per_source = {}
self.left_est_card = left_card
self.produced_tuples = 0
# Config
self.__type2limit = {
"tpf" : 1,
"brtpf" : 30,
"sparql" : 50
}
def __str__(self):
return str("Poly Bind Join")
@staticmethod
def symmetric():
return False
def execute(self, inputs, out):
# Executes the Xnjoin.
self.left = inputs[0]
self.right = inputs[1]
self.qresults = out
self.limits = {}
self.reservoir = {}
self.tables = {}
# Stats for deciding swicth
tuples_left_cnt = 0
right_requests = int(math.ceil(self.right.query.cardinality / 100.0))
switch_left_count = right_requests - self.left_est_card
relevant_sources = self.right.query.sources.keys()
for ldf_source in relevant_sources:
self.limits[ldf_source] = self.__type2limit.get(ldf_source.split("@")[0], 1)
self.reservoir[ldf_source] = list()
self.tables[ldf_source] = {}
self.results_per_source[ldf_source] = 0
# Get tuples from queue.
tuple1 = self.left.get(True)
tuples_left_cnt +=1
switch_mode = False
#
# Get the tuples from the queues.
while not(tuple1.data == "EOF"):
for ldf_server in [k for k, v in sorted(self.results_per_source.items(), key=lambda item: item[1],
reverse=True)]:
self.stage1(tuple1, self.right, ldf_server)
# Check whether to switch:
if tuples_left_cnt > switch_left_count and tuples_left_cnt > self.left_est_card:
switch_mode = True
break
tuple1 = self.left.get(True)
tuples_left_cnt += 1
if switch_mode:
logger.debug("NLJ: Switching Mode to Hash Join")
logger.debug("Left tuples expected : {}; received: {}; switch count: {}; right_plan request remaining: {}".
format(self.left_est_card, tuples_left_cnt, switch_left_count, right_requests))
self.hash_join_mode()
else:
for ldf_server in relevant_sources:
self.stage1(tuple1, self.right, ldf_server)
# Add EOF to queue.
#self.probing.value = 1
tuple1.done = tuple1.done | pow(2, self.id_operator)
tuple1.ready = self.right.sources_desc[self.right.sources.keys()[0]] | tuple1.ready
tuple1.sources = set(tuple1.sources) | set([self.right.sources.keys()[0]])
tuple1.from_operator = self.id_operator
tuple1.tuples_produced.update({self.id_operator : self.produced_tuples})
self.to_queue(tuple1)
self.probing.value = 0
# Stage 1: While one of the sources is sending data.
def stage1(self, tuple1, right, ldf_server):
if tuple1.data != "EOF" and tuple1 != "EOF":
# Check if the data is already in its sources tuple table
rtuple = self.probe_table_of_source(tuple1, right, ldf_server, self.tables[ldf_server])
if rtuple:
self.reservoir[ldf_server].append(rtuple)
if len(self.reservoir[ldf_server]) >= self.limits[ldf_server]:
self.probe_tuples_from_source(self.reservoir[ldf_server], right, ldf_server, self.tables[ldf_server])
self.reservoir[ldf_server] = list()#set()
elif len(self.reservoir[ldf_server]) > 0:
resevoir = list()
for res_tuple in self.reservoir[ldf_server]:
rtuple = self.probe_table_of_source(res_tuple, right, ldf_server, self.tables[ldf_server])
if rtuple:
resevoir.append(rtuple)
self.probe_tuples_from_source(resevoir, right, ldf_server, self.tables[ldf_server])
def to_queue(self, res, source):
self.produced_tuples += 1
self.results_per_source[source] = self.results_per_source.get(source, 0) + 1
self.qresults[self.eddy].put(res)
def probe_table_of_source(self, rtuple, right, ldf_server, tuple_rjttable):
# Get the value(s) of the operator variable(s) in the tuple.
resource = ''
for var in self.vars:
resource = resource + str(rtuple.data[var])
probe_ts = time()
# If the resource is in table, produce results.
if resource in tuple_rjttable.keys():
tuple_rjttable.get(resource).setRJTProbeTS(probe_ts)
list_records = tuple_rjttable[resource].records
# For each match, produce the results (solution mappings).
for record in list_records:
res = {}
if record.tuple.data == "EOF":
break
# Merge solution mappings.
res.update(record.tuple.data)
res.update(rtuple.data)
# Update ready and done vectors.
ready = record.tuple.ready | rtuple.ready
done = record.tuple.done | rtuple.done | pow(2, self.id_operator)
sources = list(set(record.tuple.sources) | set(rtuple.sources))
# Create solution mapping.
res = Tuple(res, ready, done, sources, self.id_operator)
# Send solution mapping to eddy operators.
self.to_queue(res, ldf_server)
return None
else:
return rtuple
def probe_tuples_from_source(self, tuple_list, right, ldf_server, tuple_rjttable):
probe_ts = time()
if len(tuple_list) > 0:
instances = []
for rtuple in tuple_list:
instance = {}
for v in self.vars:
instance.update({v: rtuple.data[v]})
instances.append(instance)
# Contact the sources.
qright = Queue()
right.execute(self.vars, instances, qright, ldf_server=ldf_server)
# Get the tuples from right_plan queue.
tuple2 = qright.get(True)
self.sources = tuple2.sources
# Empty result set.
if (tuple2 == "EOF") or (tuple2.data == "EOF"):
# For all tested tuples add the tail to the records
for tested_tuple in tuple_list:
resource = ''
for var in self.vars:
resource = resource + str(tested_tuple.data[var])
record = Record(tuple2, probe_ts, time(), float("inf"))
tail = RJTTail(record, float("inf"))
tuple_rjttable[resource] = tail
# Non-empty result set.
while (tuple2 != "EOF") and (tuple2.data != "EOF"):
rtuple_added = False
for rtuple in tuple_list:
if not compatible_solutions(rtuple.data, tuple2.data):
continue
# Create solution mapping.
data = {}
data.update(tuple2.data)
data.update(rtuple.data)
# Update ready and done vectors of solution mapping.
ready = tuple2.ready | rtuple.ready
done = tuple2.done | rtuple.done | pow(2, self.id_operator)
sources = list(set(tuple2.sources) | set(rtuple.sources))
# Create tuple.
res = Tuple(data, ready, done, sources, self.id_operator)
# Introduce the results of contacting the sources in the corresponding table.
record = Record(tuple2, probe_ts, time(), float("inf"))
resource = ''
for var in self.vars:
resource = resource + str(rtuple.data[var])
# Send tuple to eddy operators.
self.to_queue(res, ldf_server)
if resource in tuple_rjttable.keys() and not rtuple_added:
tuple_rjttable.get(resource).updateRecords(record)
tuple_rjttable.get(resource).setRJTProbeTS(probe_ts)
else:
tail = RJTTail(record, float("inf"))
tuple_rjttable[resource] = tail
rtuple_added = True
# Get next solution.
tuple2 = qright.get(True)
# Close queue for this sources.
qright.close()
def hash_join_mode(self):
self.p_list = []
# Create Independent Operator for the right_plan side
self.right_operator = IndependentOperator(self.right.sources.keys()[0], self.right.server, self.right.query,
self.right.sources_desc, eofs_desc=self.right.sources_desc)
# Create Queue for it
self.right = Queue()
p2 = Process(target=self.right_operator.execute,
args=(None, self.right, None, self.p_list,))
# Execute Operator
p2.start()
self.p_list.append(p2.pid)
while True:
self.probing.value = 1
# Try to get and process tuple from left_plan queue.
try:
tuple1 = self.left.get(False)
self.stage1_hash_join(tuple1, self.left_table, self.right_table)
except Empty:
# Empty: in tuple1 = self.left_plan.get(False), when the queue is empty.
self.probing.value = 0
pass
except TypeError:
# TypeError: in resource = resource + tuple[var], when the tuple is "EOF".
pass
except IOError:
# IOError: when a tuple is received, but the alarm is fired.
pass
# Try to get and process tuple from right_plan queue.
try:
tuple2 = self.right.get(False)
self.stage1_hash_join(tuple2, self.right_table, self.left_table)
except Empty:
# Empty: in tuple2 = self.right_plan.get(False), when the queue is empty.
self.probing.value = 0
pass
except TypeError:
# TypeError: in resource = resource + tuple[var], when the tuple is "EOF".
pass
except IOError:
# IOError: when a tuple is received, but the alarm is fired.
pass
# Stage 1: While one of the sources is sending data.
def stage1_hash_join(self, tuple1, tuple_rjttable, other_rjttable):
# Get the value(s) of the operator variable(s) in the tuple.
resource = ''
if tuple1.data != "EOF":
for var in self.vars:
try:
resource = resource + str(tuple1.data[var])
except Exception as e:
raise e
else:
resource = "EOF"
# Probe the tuple against its RJT table.
probe_ts = self.probe_hash_table(tuple1, resource, tuple_rjttable)
# Create the records.
record = Record(tuple1, probe_ts, time(), float("inf"))
# Insert the record in the corresponding RJT table.
if resource in other_rjttable:
other_rjttable.get(resource).updateRecords(record)
other_rjttable.get(resource).setRJTProbeTS(probe_ts)
else:
tail = RJTTail(record, probe_ts)
other_rjttable[resource] = tail
def probe_hash_table(self, tuple1, resource, rjttable):
# Probe a tuple against its corresponding table.
probe_ts = time()
# If the resource is in the table, produce results.
if resource in rjttable:
rjttable.get(resource).setRJTProbeTS(probe_ts)
list_records = rjttable[resource].records
# For each matching solution mapping, generate an answer.
for record in list_records:
if resource != "EOF":
# Merge solution mappings.
data = {}
data.update(record.tuple.data)
data.update(tuple1.data)
else:
data = "EOF"
# Update ready and done vectors.
ready = record.tuple.ready | tuple1.ready
done = record.tuple.done | tuple1.done | pow(2, self.id_operator)
sources = list(set(record.tuple.sources) | set(tuple1.sources))
# Create tuple.
res = Tuple(data, ready, done, sources, self.id_operator)
# Send tuple to eddy operators.
self.qresults[self.eddy].put(res)
return probe_ts
| 36.316062 | 119 | 0.561278 |
4a1afd48f810330751f66a32b111580451ef4b7a
| 12,886 |
py
|
Python
|
tests/objects/fibers_test.py
|
3d-pli/fastpli
|
fe90ac53a7e78d122696bebb4816f4cb953cdb72
|
[
"MIT"
] | 13 |
2020-03-21T10:40:36.000Z
|
2022-03-20T17:27:56.000Z
|
tests/objects/fibers_test.py
|
3d-pli/fastpli
|
fe90ac53a7e78d122696bebb4816f4cb953cdb72
|
[
"MIT"
] | 11 |
2021-01-30T07:21:52.000Z
|
2021-03-16T15:24:41.000Z
|
tests/objects/fibers_test.py
|
3d-pli/fastpli
|
fe90ac53a7e78d122696bebb4816f4cb953cdb72
|
[
"MIT"
] | 6 |
2020-08-27T07:19:30.000Z
|
2021-07-20T08:49:11.000Z
|
import unittest
import numpy as np
import fastpli.objects
import fastpli.tools
class MainTest(unittest.TestCase):
def setUp(self):
self.fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [1, 1, 1, 2]])
self.fiber_bundle = fastpli.objects.FiberBundle(self.fiber.copy())
self.fiber_bundles = fastpli.objects.FiberBundles(self.fiber.copy())
def test_init(self):
fastpli.objects.FiberBundle()
fastpli.objects.FiberBundles()
a = np.array([0, 0, 0, 0])
_ = fastpli.objects.Fiber([[0, 0, 0, 1], [0, 0, 1, 2]])
f = fastpli.objects.Fiber(a)
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
f = fastpli.objects.Fiber(f)
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
fb = fastpli.objects.FiberBundle([a])
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
fb = fastpli.objects.FiberBundle(f)
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
fb = fastpli.objects.FiberBundle(fb)
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
fbs = fastpli.objects.FiberBundles([[a]])
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fbs = fastpli.objects.FiberBundles(f)
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fbs = fastpli.objects.FiberBundles([f, f])
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fbs = fastpli.objects.FiberBundles(fbs)
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
fb = fastpli.objects.FiberBundle([[[0, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]]])
for f in fb:
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
self.assertTrue(isinstance(f._data, np.ndarray))
fbs = fastpli.objects.FiberBundles([[[[0, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]]],
[[[0, 1, 2, 3], [1, 2, 3, 4],
[2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4],
[2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4],
[2, 4, 5, 5]]]])
for fb in fbs:
self.assertTrue(isinstance(fb, fastpli.objects.FiberBundle))
for f in fb:
self.assertTrue(isinstance(f, fastpli.objects.Fiber))
self.assertTrue(isinstance(f._data, np.ndarray))
def test_type(self):
self.assertTrue(isinstance(self.fiber[:], np.ndarray))
self.assertTrue(self.fiber[:].dtype == float)
self.assertTrue(
fastpli.objects.Fiber([[1, 1, 1, 1]], np.float32).dtype ==
np.float32)
def test_layers(self):
fastpli.objects.FiberBundle(self.fiber_bundle,
[(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'), (1.0, 0.004, 1, 'r')])
fastpli.objects.FiberBundles(self.fiber_bundles,
[[(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'),
(1.0, 0.004, 1, 'r')]])
fb = fastpli.objects.FiberBundle([[[0, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1],
[2, 2, 2, 1]]])
fb = fastpli.objects.FiberBundle(fb, [(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'),
(1.0, 0.004, 1, 'r')])
fbs = [[[[0, 0, 0, 1], [1, 1, 1, 1], [2, 2, 2, 1]],
[[1, 0, 0, 1], [1, 1, 1, 1], [2, 2, 2, 1]]],
[[[0, 1, 2, 3], [1, 2, 3, 4], [2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4], [2, 4, 5, 5]],
[[1, 1, 2, 3], [1, 2, 3, 4], [2, 4, 5, 5]]]]
fbs = fastpli.objects.FiberBundles(fbs,
[[(0.333, -0.004, 10, 'p'),
(0.666, 0, 5, 'b'),
(1.0, 0.004, 1, 'r')]] * len(fbs))
def test_resize(self):
fiber = self.fiber.scale(10)
self.assertTrue(np.array_equal(fiber[:], self.fiber[:] * 10))
fb = self.fiber_bundle.scale(10)
for f in fb:
self.assertTrue(np.array_equal(f[:], self.fiber[:] * 10))
fbs = self.fiber_bundles.scale(10)
for fb in fbs:
for f in fb:
self.assertTrue(np.array_equal(f[:], self.fiber[:] * 10))
fiber = self.fiber.scale(10, mode='points')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2] * 10))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
fiber = self.fiber.scale(10, mode='radii')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2]))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1] * 10))
def test_rotation(self):
fiber = self.fiber.rotate(fastpli.tools.rotation.x(0))
self.assertTrue(np.array_equal(self.fiber[:], fiber[:]))
fiber = self.fiber.rotate(fastpli.tools.rotation.x(np.deg2rad(90)))
self.assertTrue(
np.allclose(fiber[:], np.array([[0, 0, 0, 1], [1, -1, 1, 2]])))
fiber = self.fiber.rotate(fastpli.tools.rotation.x(np.deg2rad(90)),
[1, 1, 1])
self.assertTrue(
np.allclose(fiber[:], np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
fiber_bundle = self.fiber_bundle.rotate(
fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(len(fiber_bundle) == len(self.fiber_bundle))
for f in fiber_bundle:
self.assertTrue(
np.allclose(f[:], np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
for fb in self.fiber_bundles:
for f in fb:
fiber = f.rotate(fastpli.tools.rotation.x(np.deg2rad(90)),
[1, 1, 1])
self.assertTrue(
np.allclose(fiber[:], np.array([[0, 2, 0, 1], [1, 1, 1,
2]])))
def test_translate(self):
fiber = self.fiber.translate([1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
fiber_bundle = self.fiber_bundle.translate([1, 1, 1])
self.assertTrue(len(fiber_bundle) == len(self.fiber_bundle))
for f in fiber_bundle:
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
for fb in self.fiber_bundles:
for f in fb:
fiber = f.translate([1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
def test_apply(self):
# Fiber
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
fiber_ = fiber.apply(lambda x: x + 1)
self.assertTrue(isinstance(fiber_, fastpli.objects.Fiber))
self.assertTrue(np.array_equal(fiber[:] + 1, fiber_[:]))
fiber_ = fiber.apply_to_points(lambda x: x + 1)
self.assertTrue(isinstance(fiber_, fastpli.objects.Fiber))
self.assertTrue(np.array_equal(fiber[:, :-1] + 1, fiber_[:, :-1]))
self.assertTrue(np.array_equal(fiber[:, -1], fiber_[:, -1]))
fiber_ = fiber.apply_to_radii(lambda x: x + 1)
self.assertTrue(isinstance(fiber_, fastpli.objects.Fiber))
self.assertTrue(np.array_equal(fiber[:, :-1], fiber_[:, :-1]))
self.assertTrue(np.array_equal(fiber[:, -1] + 1, fiber_[:, -1]))
# FiberBundle
fb = fastpli.objects.FiberBundle([[0, 0, 0, 1], [1, 1, 1, 2]],
dtype=float)
fb_ = fb.apply(lambda x: x + 1)
self.assertTrue(isinstance(fb_, fastpli.objects.FiberBundle))
self.assertTrue(np.array_equal(fb[0][:] + 1, fb_[0][:]))
fb_ = fb.apply_to_points(lambda x: x + 1)
self.assertTrue(isinstance(fb_, fastpli.objects.FiberBundle))
self.assertTrue(np.array_equal(fb[0][:, :-1] + 1, fb_[0][:, :-1]))
self.assertTrue(np.array_equal(fb[0][:, -1], fb_[0][:, -1]))
fb_ = fb.apply_to_radii(lambda x: x + 1)
self.assertTrue(isinstance(fb_, fastpli.objects.FiberBundle))
self.assertTrue(np.array_equal(fb[0][:, :-1], fb_[0][:, :-1]))
self.assertTrue(np.array_equal(fb[0][:, -1] + 1, fb_[0][:, -1]))
# FiberBundles
fbs = fastpli.objects.FiberBundles([[[0, 0, 0, 1], [1, 1, 1, 2]]],
dtype=float)
fbs_ = fbs.apply(lambda x: x + 1)
self.assertTrue(isinstance(fbs_, fastpli.objects.FiberBundles))
self.assertTrue(np.array_equal(fbs[0][0][:] + 1, fbs_[0][0][:]))
fbs_ = fbs.apply_to_points(lambda x: x + 1)
self.assertTrue(isinstance(fbs_, fastpli.objects.FiberBundles))
self.assertTrue(
np.array_equal(fbs[0][0][::, :-1] + 1, fbs_[0][0][:, :-1]))
self.assertTrue(np.array_equal(fbs[0][0][:, -1], fbs_[0][0][:, -1]))
fbs_ = fbs.apply_to_radii(lambda x: x + 1)
self.assertTrue(isinstance(fbs_, fastpli.objects.FiberBundles))
self.assertTrue(np.array_equal(fbs[0][0][:, :-1], fbs_[0][0][:, :-1]))
self.assertTrue(np.array_equal(fbs[0][0][:, -1] + 1, fbs_[0][0][:, -1]))
def test_cut(self):
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
fibers = fiber.cut([[-10] * 3, [10] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2]])
fibers = fiber.cut([[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2],
[100, 100, 100, 2]])
fibers = fiber.cut([[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2],
[100, 100, 100, 2], [10, 10, 10, 2],
[0, 0, 0, 1]])
fibers = fiber.cut([[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
self.assertTrue(not np.array_equal(fibers[1][:], fiber[:]))
fiber_bundle = fastpli.objects.FiberBundle(fiber)
cut_fb = fiber_bundle.cut([[-5] * 3, [5] * 3])
fibers = cut_fb
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
self.assertTrue(not np.array_equal(fibers[1][:], fiber[:]))
fiber_bundles = fastpli.objects.FiberBundles(fiber)
cut_fbs = fiber_bundles.cut([[-5] * 3, [5] * 3])
fibers = cut_fbs[0]
self.assertTrue(len(cut_fbs) == 1)
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0][:], fiber[:]))
self.assertTrue(not np.array_equal(fibers[1][:], fiber[:]))
fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [10, 10, 10, 2]])
fibers = fiber.cut([[5] * 3, [6] * 3])
self.assertTrue(np.array_equal(fibers[0][:], fiber[:]))
if __name__ == '__main__':
unittest.main()
| 45.214035 | 80 | 0.489524 |
4a1afe6dc9e9e63e177b561ffba25552d61d05bc
| 190 |
py
|
Python
|
back/sales/serializers.py
|
Marthox1999/UV-Energy
|
8827c4ff1ffae574d7f13ec6d733b3eda68424f9
|
[
"MIT"
] | null | null | null |
back/sales/serializers.py
|
Marthox1999/UV-Energy
|
8827c4ff1ffae574d7f13ec6d733b3eda68424f9
|
[
"MIT"
] | 58 |
2020-02-22T20:46:15.000Z
|
2021-04-08T21:16:28.000Z
|
back/sales/serializers.py
|
MarthoxG/UV-Energy
|
8827c4ff1ffae574d7f13ec6d733b3eda68424f9
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from sales.models import Bill
class BillSerializers(serializers.ModelSerializer):
class Meta:
model = Bill
fields = '__all__'
| 15.833333 | 51 | 0.721053 |
4a1afe722010932db730f3b81c3a3f0b99f086fc
| 211,625 |
py
|
Python
|
library/Box2D/Box2D.py
|
foxik/pybox2d
|
bdfd1d1120338b9e31615df25c4e4400cf9a42a4
|
[
"Zlib"
] | null | null | null |
library/Box2D/Box2D.py
|
foxik/pybox2d
|
bdfd1d1120338b9e31615df25c4e4400cf9a42a4
|
[
"Zlib"
] | null | null | null |
library/Box2D/Box2D.py
|
foxik/pybox2d
|
bdfd1d1120338b9e31615df25c4e4400cf9a42a4
|
[
"Zlib"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _Box2D
else:
import _Box2D
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _Box2D.SWIG_PyInstanceMethod_New
_swig_new_static_method = _Box2D.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
def _dir_filter(self):
"""
Using introspection, mimic dir() by adding up all of the __dicts__
for the current class and all base classes (type(self).__mro__ returns
all of the classes that make it up)
Basically filters by:
__x__ OK
__x bad
_classname bad
"""
def check(s):
if s.startswith('__'):
if s.endswith('__'):
return True
else:
return False
else:
for typename in typenames:
if typename in s:
return False
return True
keys = sum([list(c.__dict__.keys()) for c in type(self).__mro__],[])
keys += list(self.__dict__.keys())
typenames = ["_%s" % c.__name__ for c in type(self).__mro__]
ret = [s for s in list(set(keys)) if check(s)]
ret.sort()
return ret
def _init_kwargs(self, **kwargs):
cls = self.__class__
for key, value in kwargs.items():
try:
getattr(cls, key)
except AttributeError:
raise AttributeError('Invalid keyword argument "%s" for %s' % (key, cls))
try:
setattr(self, key, value)
except Exception as ex:
raise ex.__class__('Failed on kwargs for %s.%s: %s' \
% (self.__class__.__name__, key, ex))
def _init_jointdef_kwargs(self, bodyA=None, bodyB=None, **kwargs):
if bodyA is not None or bodyB is not None:
# Make sure that bodyA and bodyB are defined before the rest
_init_kwargs(self, bodyA=bodyA, bodyB=bodyB)
_init_kwargs(self, **kwargs)
_repr_attrs = {'b2AABB': ['center', 'extents', 'lowerBound', 'perimeter', 'upperBound',
'valid', ],
'b2Body': ['active', 'angle', 'angularDamping', 'angularVelocity', 'awake',
'bullet', 'contacts', 'fixedRotation', 'fixtures',
'inertia', 'joints', 'linearDamping', 'linearVelocity',
'localCenter', 'mass', 'massData', 'position',
'sleepingAllowed', 'transform', 'type', 'userData',
'worldCenter', ],
'b2BodyDef': ['active', 'allowSleep', 'angle', 'angularDamping', 'angularVelocity',
'awake', 'bullet', 'fixedRotation', 'fixtures',
'inertiaScale', 'linearDamping', 'linearVelocity', 'position',
'shapeFixture', 'shapes', 'type', 'userData',
],
'b2BroadPhase': ['proxyCount', ],
'b2CircleShape': ['childCount', 'pos', 'radius', 'type', ],
'b2ClipVertex': ['id', 'v', ],
'b2Color': ['b', 'bytes', 'g', 'list', 'r',
],
'b2Contact': ['childIndexA', 'childIndexB', 'enabled', 'fixtureA', 'fixtureB',
'manifold', 'touching', 'worldManifold', ],
'b2ContactEdge': ['contact', 'other', ],
'b2ContactFeature': ['indexA', 'indexB', 'typeA', 'typeB', ],
'b2ContactID': ['cf', 'key', ],
'b2ContactImpulse': ['normalImpulses', 'tangentImpulses', ],
'b2ContactManager': ['allocator', 'broadPhase', 'contactCount', 'contactFilter', 'contactList',
'contactListener', ],
'b2ContactPoint': ['fixtureA', 'fixtureB', 'normal', 'position', 'state',
],
'b2DistanceInput': ['proxyA', 'proxyB', 'transformA', 'transformB', 'useRadii',
],
'b2DistanceJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'dampingRatio', 'frequency', 'length', 'type',
'userData', ],
'b2DistanceJointDef': ['anchorA', 'anchorB', 'bodyA', 'bodyB', 'collideConnected',
'dampingRatio', 'frequencyHz', 'length', 'localAnchorA',
'localAnchorB', 'type', 'userData', ],
'b2DistanceOutput': ['distance', 'iterations', 'pointA', 'pointB', ],
'b2DistanceProxy': ['m_buffer', 'shape', 'vertices', ],
'b2Draw': ['flags', ],
'b2DrawExtended': ['center', 'convertVertices', 'flags', 'flipX', 'flipY',
'offset', 'screenSize', 'zoom', ],
'b2EdgeShape': ['all_vertices', 'childCount', 'hasVertex0', 'hasVertex3', 'radius',
'type', 'vertex0', 'vertex1', 'vertex2',
'vertex3', 'vertexCount', 'vertices', ],
'b2Filter': ['categoryBits', 'groupIndex', 'maskBits', ],
'b2Fixture': ['body', 'density', 'filterData', 'friction', 'massData',
'restitution', 'sensor', 'shape', 'type',
'userData', ],
'b2FixtureDef': ['categoryBits', 'density', 'filter', 'friction', 'groupIndex',
'isSensor', 'maskBits', 'restitution', 'shape',
'userData', ],
'b2FixtureProxy': ['aabb', 'childIndex', 'fixture', 'proxyId', ],
'b2FrictionJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'maxForce', 'maxTorque', 'type', 'userData',
],
'b2FrictionJointDef': ['anchor', 'bodyA', 'bodyB', 'collideConnected', 'localAnchorA',
'localAnchorB', 'maxForce', 'maxTorque', 'type',
'userData', ],
'b2GearJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'ratio', 'type', 'userData', ],
'b2GearJointDef': ['bodyA', 'bodyB', 'collideConnected', 'joint1', 'joint2',
'ratio', 'type', 'userData', ],
'b2Jacobian': ['angularA', 'angularB', 'linearA', 'linearB', ],
'b2Joint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'type', 'userData', ],
'b2JointDef': ['bodyA', 'bodyB', 'collideConnected', 'type', 'userData',
],
'b2JointEdge': ['joint', 'other', ],
'b2WheelJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'maxMotorTorque', 'motorEnabled', 'motorSpeed', 'speed',
'springDampingRatio', 'springFrequencyHz', 'translation', 'type',
'userData', ],
'b2WheelJointDef': ['anchor', 'axis', 'bodyA', 'bodyB', 'collideConnected',
'dampingRatio', 'enableMotor', 'frequencyHz', 'localAnchorA',
'localAnchorB', 'localAxisA', 'maxMotorTorque', 'motorSpeed',
'type', 'userData', ],
'b2ChainShape': ['childCount', 'edges', 'radius', 'type', 'vertexCount',
'vertices', ],
'b2Manifold': ['localNormal', 'localPoint', 'pointCount', 'points', 'type_',
],
'b2ManifoldPoint': ['id', 'isNew', 'localPoint', 'normalImpulse', 'tangentImpulse',
],
'b2MassData': ['I', 'center', 'mass', ],
'b2Mat22': ['angle', 'col1', 'col2', 'inverse', ],
'b2Mat33': ['col1', 'col2', 'col3', ],
'b2MouseJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'dampingRatio', 'frequency', 'maxForce', 'target',
'type', 'userData', ],
'b2MouseJointDef': ['bodyA', 'bodyB', 'collideConnected', 'dampingRatio', 'frequencyHz',
'maxForce', 'target', 'type', 'userData',
],
'b2Pair': ['proxyIdA', 'proxyIdB', ],
'b2PolygonShape': ['box', 'centroid', 'childCount', 'normals', 'radius',
'type', 'valid', 'vertexCount', 'vertices',
],
'b2PrismaticJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'limitEnabled', 'limits', 'lowerLimit', 'maxMotorForce',
'motorEnabled', 'motorSpeed', 'speed', 'translation',
'type', 'upperLimit', 'userData', ],
'b2PrismaticJointDef': ['anchor', 'axis', 'bodyA', 'bodyB', 'collideConnected',
'enableLimit', 'enableMotor', 'localAnchorA', 'localAnchorB',
'localAxis1', 'lowerTranslation', 'maxMotorForce', 'motorSpeed',
'referenceAngle', 'type', 'upperTranslation', 'userData',
],
'b2PulleyJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'groundAnchorA', 'groundAnchorB', 'length1', 'length2',
'ratio', 'type', 'userData', ],
'b2PulleyJointDef': ['anchorA', 'anchorB', 'bodyA', 'bodyB', 'collideConnected',
'groundAnchorA', 'groundAnchorB', 'lengthA', 'lengthB',
'localAnchorA', 'localAnchorB', 'maxLengthA', 'maxLengthB',
'ratio', 'type', 'userData', ],
'b2RayCastInput': ['maxFraction', 'p1', 'p2', ],
'b2RayCastOutput': ['fraction', 'normal', ],
'b2RevoluteJoint': ['active', 'anchorA', 'anchorB', 'angle', 'bodyA',
'bodyB', 'limitEnabled', 'limits', 'lowerLimit',
'maxMotorTorque', 'motorEnabled', 'motorSpeed', 'speed',
'type', 'upperLimit', 'userData', ],
'b2RevoluteJointDef': ['anchor', 'bodyA', 'bodyB', 'collideConnected', 'enableLimit',
'enableMotor', 'localAnchorA', 'localAnchorB', 'lowerAngle',
'maxMotorTorque', 'motorSpeed', 'referenceAngle', 'type',
'upperAngle', 'userData', ],
'b2RopeJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'limitState', 'maxLength', 'type', 'userData',
],
'b2RopeJointDef': ['anchorA', 'anchorB', 'bodyA', 'bodyB', 'collideConnected',
'localAnchorA', 'localAnchorB', 'maxLength', 'type',
'userData', ],
'b2Shape': ['childCount', 'radius', 'type', ],
'b2Sweep': ['a', 'a0', 'alpha0', 'c', 'c0',
'localCenter', ],
'b2TOIInput': ['proxyA', 'proxyB', 'sweepA', 'sweepB', 'tMax',
],
'b2TOIOutput': ['state', 't', ],
'b2Transform': ['R', 'angle', 'position', ],
'b2Vec2': ['length', 'lengthSquared', 'skew', 'tuple', 'valid',
'x', 'y', ],
'b2Vec3': ['length', 'lengthSquared', 'tuple', 'valid', 'x',
'y', 'z', ],
'b2Version': ['major', 'minor', 'revision', ],
'b2WeldJoint': ['active', 'anchorA', 'anchorB', 'bodyA', 'bodyB',
'type', 'userData', ],
'b2WeldJointDef': ['anchor', 'bodyA', 'bodyB', 'collideConnected', 'localAnchorA',
'localAnchorB', 'referenceAngle', 'type', 'userData',
],
'b2World': ['autoClearForces', 'bodies', 'bodyCount', 'contactCount', 'contactFilter',
'contactListener', 'contactManager', 'contacts', 'continuousPhysics',
'destructionListener', 'gravity', 'jointCount', 'joints',
'locked', 'proxyCount', 'renderer', 'subStepping',
'warmStarting', ],
'b2WorldManifold': ['normal', 'points', ],
}
MAX_REPR_DEPTH = 4
MAX_REPR_STR_LEN = 250
MAX_REPR_SUB_LINES = 10
REPR_INDENT = 4
_repr_state = {}
def _format_repr(obj):
"""
Dynamically creates the object representation string for `obj`.
Attributes found in _repr_attrs[class_name] will be included.
"""
global _repr_state
if 'spaces' not in _repr_state:
_repr_state['spaces'] = 0
if 'depth' not in _repr_state:
_repr_state['depth'] = 1
else:
_repr_state['depth'] += 1
if _repr_state['depth'] > MAX_REPR_DEPTH:
_repr_state['depth'] -= 1
return '%s(max recursion depth hit)' % (' ' * _repr_state['spaces'])
class_line = '%s(' % (obj.__class__.__name__, )
orig_spaces = _repr_state['spaces']
ret = []
props = _repr_attrs.get(obj.__class__.__name__, [])
try:
prop_spacing = _repr_state['spaces'] + len(class_line.lstrip())
separator = '\n' + ' ' * prop_spacing
for prop in props:
_repr_state['spaces'] = len(prop) + 1
try:
s = repr(getattr(obj, prop))
except Exception as ex:
s = '(repr: %s)' % ex
lines = s.split('\n')
if len(lines) > MAX_REPR_SUB_LINES:
length_ = 0
for i, line_ in enumerate(lines[:MAX_REPR_SUB_LINES]):
length_ += len(line_)
if length_ > MAX_REPR_STR_LEN:
ending_delim = []
for j in s[::-1]:
if j in ')]}':
ending_delim.insert(0, j)
else:
break
ret[-1] = '%s... %s' % (ret[-1], ''.join(ending_delim))
break
if i == 0:
ret.append('%s=%s' % (prop, line_))
else:
ret.append(line_)
else:
ret.append('%s=%s' % (prop, lines[0].lstrip()))
if len(lines) > 1:
ret.extend(lines[1:])
ret[-1] += ','
finally:
_repr_state['depth'] -= 1
_repr_state['spaces'] = orig_spaces
if 1<= len(ret) <= 3:
# Closing parenthesis on same line
ret[-1] += ')'
return ''.join(ret)
else:
# Closing parenthesis on next line
ret.append(')')
return '%s%s' % (class_line, separator.join(ret))
__jointeq = _Box2D.__jointeq
__bodyeq = _Box2D.__bodyeq
__shapeeq = _Box2D.__shapeeq
__fixtureeq = _Box2D.__fixtureeq
__b2ComputeCentroid = _Box2D.__b2ComputeCentroid
b2CheckVertices = _Box2D.b2CheckVertices
b2CheckPolygon = _Box2D.b2CheckPolygon
RAND_LIMIT = _Box2D.RAND_LIMIT
b2Random = _Box2D.b2Random
b2_epsilon = 1.192092896e-07
class _indexable_generator(list):
def __init__(self, iter):
list.__init__(self)
self.iter=iter
self.__full=False
def __len__(self):
self.__fill_list__()
return super(_indexable_generator, self).__len__()
def __iter__(self):
for item in self.iter:
self.append(item)
yield item
self.__full=True
def __fill_list__(self):
for item in self.iter:
self.append(item)
self.__full=True
def __getitem__(self, i):
"""Support indexing positive/negative elements of the generator,
but no slices. If you want those, use list(generator)"""
if not self.__full:
if i < 0:
self.__fill_list__()
elif i >= list.__len__(self):
diff=i-list.__len__(self)+1
for j in range(diff):
value = next(self.iter)
self.append(value)
return super(_indexable_generator, self).__getitem__(i)
def _generator_from_linked_list(first):
if first:
one = first
while one:
yield one
one = one.next
def _list_from_linked_list(first):
if not first:
return []
one = first
lst = []
while one:
lst.append(one)
one = one.next
# linked lists are stored in reverse order from creation order
lst.reverse()
return lst
# Support using == on bodies, joints, and shapes
def b2ShapeCompare(a, b):
if not isinstance(a, b2Shape) or not isinstance(b, b2Shape):
return False
return __shapeeq(a, b)
def b2BodyCompare(a, b):
if not isinstance(a, b2Body) or not isinstance(b, b2Body):
return False
return __bodyeq(a, b)
def b2JointCompare(a, b):
if not isinstance(a, b2Joint) or not isinstance(b, b2Joint):
return False
return __jointeq(a, b)
def b2FixtureCompare(a, b):
if not isinstance(a, b2Fixture) or not isinstance(b, b2Fixture):
return False
return __fixtureeq(a, b)
_b2Distance = _Box2D._b2Distance
import collections
b2DistanceResult = collections.namedtuple('b2DistanceResult',
'pointA pointB distance iterations')
def b2Distance(shapeA=None, idxA=0, shapeB=None, idxB=0, transformA=None, transformB=None, useRadii=True):
"""
Compute the closest points between two shapes.
Can be called one of two ways:
+ b2Distance(b2DistanceInput)
This uses the b2DistanceInput structure, where you define your own
distance proxies
Or more conveniently using kwargs:
+ b2Distance(shapeA=.., idxA=0, shapeB=.., idxB=0, transformA=..,
transformB=.., useRadii=True)
Returns a namedtuple in the form:
b2DistanceResult(pointA=(ax, ay), pointB=(bx, by), distance,
iterations)
"""
if isinstance(shapeA, b2DistanceInput):
out = _b2Distance(shapeA)
else:
out = _b2Distance(shapeA, idxA, shapeB, idxB, transformA, transformB, useRadii)
return b2DistanceResult(pointA=tuple(out.pointA),
pointB=tuple(out.pointB),
distance=out.distance,
iterations=out.iterations)
b2GetPointStates = _Box2D.b2GetPointStates
class b2ContactPoint(object):
r"""Proxy of C++ b2ContactPoint class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2ContactPoint_swiginit(self,_Box2D.new_b2ContactPoint())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ContactPoint
fixtureA = property(_Box2D.b2ContactPoint_fixtureA_get, _Box2D.b2ContactPoint_fixtureA_set, doc=r"""fixtureA : p.b2Fixture""")
fixtureB = property(_Box2D.b2ContactPoint_fixtureB_get, _Box2D.b2ContactPoint_fixtureB_set, doc=r"""fixtureB : p.b2Fixture""")
normal = property(_Box2D.b2ContactPoint_normal_get, _Box2D.b2ContactPoint_normal_set, doc=r"""normal : b2Vec2""")
position = property(_Box2D.b2ContactPoint_position_get, _Box2D.b2ContactPoint_position_set, doc=r"""position : b2Vec2""")
state = property(_Box2D.b2ContactPoint_state_get, _Box2D.b2ContactPoint_state_set, doc=r"""state : b2PointState""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactPoint___hash__)
def __repr__(self):
return _format_repr(self)
# Register b2ContactPoint in _Box2D:
_Box2D.b2ContactPoint_swigregister(b2ContactPoint)
b2Globals = _Box2D.b2Globals
_b2TimeOfImpact = _Box2D._b2TimeOfImpact
def b2TimeOfImpact(shapeA=None, idxA=0, shapeB=None, idxB=0, sweepA=None, sweepB=None, tMax=0.0):
"""
Compute the upper bound on time before two shapes penetrate. Time is represented as
a fraction between [0,tMax]. This uses a swept separating axis and may miss some intermediate,
non-tunneling collision. If you change the time interval, you should call this function
again.
Note: use b2Distance to compute the contact point and normal at the time of impact.
Can be called one of several ways:
+ b2TimeOfImpact(b2TOIInput) # utilizes the b2TOIInput structure, where you define your own proxies
Or utilizing kwargs:
+ b2TimeOfImpact(shapeA=a, shapeB=b, idxA=0, idxB=0, sweepA=sa, sweepB=sb, tMax=t)
Where idxA and idxB are optional and used only if the shapes are loops (they indicate which section to use.)
sweep[A,B] are of type b2Sweep.
Returns a tuple in the form:
(output state, time of impact)
Where output state is in b2TOIOutput.[
e_unknown,
e_failed,
e_overlapped,
e_touching,
e_separated ]
"""
if isinstance(shapeA, b2TOIInput):
toi_input = shapeA
out = _b2TimeOfImpact(toi_input)
else:
out = _b2TimeOfImpact(shapeA, idxA, shapeB, idxB, sweepA, sweepB, tMax)
return (out.state, out.t)
class b2AssertException(object):
r"""Proxy of C++ b2AssertException class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2AssertException___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self):
r"""__init__(b2AssertException self) -> b2AssertException"""
_Box2D.b2AssertException_swiginit(self, _Box2D.new_b2AssertException())
__swig_destroy__ = _Box2D.delete_b2AssertException
# Register b2AssertException in _Box2D:
_Box2D.b2AssertException_swigregister(b2AssertException)
b2_pi = _Box2D.b2_pi
b2_maxManifoldPoints = _Box2D.b2_maxManifoldPoints
b2_maxPolygonVertices = _Box2D.b2_maxPolygonVertices
b2_aabbExtension = _Box2D.b2_aabbExtension
b2_aabbMultiplier = _Box2D.b2_aabbMultiplier
b2_linearSlop = _Box2D.b2_linearSlop
b2_angularSlop = _Box2D.b2_angularSlop
b2_polygonRadius = _Box2D.b2_polygonRadius
b2_maxSubSteps = _Box2D.b2_maxSubSteps
b2_maxTOIContacts = _Box2D.b2_maxTOIContacts
b2_velocityThreshold = _Box2D.b2_velocityThreshold
b2_maxLinearCorrection = _Box2D.b2_maxLinearCorrection
b2_maxAngularCorrection = _Box2D.b2_maxAngularCorrection
b2_maxTranslation = _Box2D.b2_maxTranslation
b2_maxTranslationSquared = _Box2D.b2_maxTranslationSquared
b2_maxRotation = _Box2D.b2_maxRotation
b2_maxRotationSquared = _Box2D.b2_maxRotationSquared
b2_baumgarte = _Box2D.b2_baumgarte
b2_toiBaugarte = _Box2D.b2_toiBaugarte
b2_timeToSleep = _Box2D.b2_timeToSleep
b2_linearSleepTolerance = _Box2D.b2_linearSleepTolerance
b2_angularSleepTolerance = _Box2D.b2_angularSleepTolerance
b2Alloc = _Box2D.b2Alloc
b2Free = _Box2D.b2Free
b2Log = _Box2D.b2Log
class b2Version(object):
r"""Version numbering scheme. See http://en.wikipedia.org/wiki/Software_versioning"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
major = property(_Box2D.b2Version_major_get, _Box2D.b2Version_major_set, doc=r"""major : int32""")
minor = property(_Box2D.b2Version_minor_get, _Box2D.b2Version_minor_set, doc=r"""minor : int32""")
revision = property(_Box2D.b2Version_revision_get, _Box2D.b2Version_revision_set, doc=r"""revision : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Version___hash__)
def __repr__(self):
return _format_repr(self)
def __repr__(self):
return "b2Version(%s.%s.%s)" % (self.major, self.minor, self.revision)
def __init__(self, **kwargs):
_Box2D.b2Version_swiginit(self,_Box2D.new_b2Version())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2Version
# Register b2Version in _Box2D:
_Box2D.b2Version_swigregister(b2Version)
b2IsValid = _Box2D.b2IsValid
b2InvSqrt = _Box2D.b2InvSqrt
class b2Vec2(object):
r"""A 2D column vector."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
SetZero = _swig_new_instance_method(_Box2D.b2Vec2_SetZero)
Set = _swig_new_instance_method(_Box2D.b2Vec2_Set)
__neg__ = _swig_new_instance_method(_Box2D.b2Vec2___neg__)
__call__ = _swig_new_instance_method(_Box2D.b2Vec2___call__)
__add_vector = _swig_new_instance_method(_Box2D.b2Vec2___add_vector)
__sub_vector = _swig_new_instance_method(_Box2D.b2Vec2___sub_vector)
__mul_float = _swig_new_instance_method(_Box2D.b2Vec2___mul_float)
__Length = _swig_new_instance_method(_Box2D.b2Vec2___Length)
__LengthSquared = _swig_new_instance_method(_Box2D.b2Vec2___LengthSquared)
Normalize = _swig_new_instance_method(_Box2D.b2Vec2_Normalize)
__IsValid = _swig_new_instance_method(_Box2D.b2Vec2___IsValid)
__Skew = _swig_new_instance_method(_Box2D.b2Vec2___Skew)
x = property(_Box2D.b2Vec2_x_get, _Box2D.b2Vec2_x_set, doc=r"""x : float32""")
y = property(_Box2D.b2Vec2_y_get, _Box2D.b2Vec2_y_set, doc=r"""y : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Vec2___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, *args):
r"""
__init__(b2Vec2 self, float32 x, float32 y) -> b2Vec2
__init__(b2Vec2 self) -> b2Vec2
__init__(b2Vec2 self, b2Vec2 other) -> b2Vec2
Construct using coordinates.
"""
_Box2D.b2Vec2_swiginit(self, _Box2D.new_b2Vec2(*args))
__iter__ = lambda self: iter( (self.x, self.y) )
__eq__ = lambda self, other: self.__equ(other)
__ne__ = lambda self,other: not self.__equ(other)
def __repr__(self):
return "b2Vec2(%g,%g)" % (self.x, self.y)
def __len__(self):
return 2
def __neg__(self):
return b2Vec2(-self.x, -self.y)
def copy(self):
"""
Return a copy of the vector.
Remember that the following:
a = b2Vec2()
b = a
Does not copy the vector itself, but b now refers to a.
"""
return b2Vec2(self.x, self.y)
__copy__ = copy
def __iadd__(self, other):
self.__add_vector(other)
return self
def __isub__(self, other):
self.__sub_vector(other)
return self
def __imul__(self, a):
self.__mul_float(a)
return self
def __itruediv__(self, a):
self.__div_float(a)
return self
def __idiv__(self, a):
self.__div_float(a)
return self
def __set(self, x, y):
self.x = x
self.y = y
def __nonzero__(self):
return self.x!=0.0 or self.y!=0.0
tuple = property(lambda self: (self.x, self.y), lambda self, value: self.__set(*value))
length = property(__Length, None)
lengthSquared = property(__LengthSquared, None)
valid = property(__IsValid, None)
skew = property(__Skew, None)
cross = _swig_new_instance_method(_Box2D.b2Vec2_cross)
__getitem__ = _swig_new_instance_method(_Box2D.b2Vec2___getitem__)
__setitem__ = _swig_new_instance_method(_Box2D.b2Vec2___setitem__)
__equ = _swig_new_instance_method(_Box2D.b2Vec2___equ)
dot = _swig_new_instance_method(_Box2D.b2Vec2_dot)
__truediv__ = _swig_new_instance_method(_Box2D.b2Vec2___truediv__)
__div__ = _swig_new_instance_method(_Box2D.b2Vec2___div__)
__mul__ = _swig_new_instance_method(_Box2D.b2Vec2___mul__)
__add__ = _swig_new_instance_method(_Box2D.b2Vec2___add__)
__sub__ = _swig_new_instance_method(_Box2D.b2Vec2___sub__)
__rmul__ = _swig_new_instance_method(_Box2D.b2Vec2___rmul__)
__rdiv__ = _swig_new_instance_method(_Box2D.b2Vec2___rdiv__)
__div_float = _swig_new_instance_method(_Box2D.b2Vec2___div_float)
__swig_destroy__ = _Box2D.delete_b2Vec2
# Register b2Vec2 in _Box2D:
_Box2D.b2Vec2_swigregister(b2Vec2)
class b2Vec3(object):
r"""A 2D column vector with 3 elements."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
SetZero = _swig_new_instance_method(_Box2D.b2Vec3_SetZero)
Set = _swig_new_instance_method(_Box2D.b2Vec3_Set)
__neg__ = _swig_new_instance_method(_Box2D.b2Vec3___neg__)
__add_vector = _swig_new_instance_method(_Box2D.b2Vec3___add_vector)
__sub_vector = _swig_new_instance_method(_Box2D.b2Vec3___sub_vector)
__mul_float = _swig_new_instance_method(_Box2D.b2Vec3___mul_float)
x = property(_Box2D.b2Vec3_x_get, _Box2D.b2Vec3_x_set, doc=r"""x : float32""")
y = property(_Box2D.b2Vec3_y_get, _Box2D.b2Vec3_y_set, doc=r"""y : float32""")
z = property(_Box2D.b2Vec3_z_get, _Box2D.b2Vec3_z_set, doc=r"""z : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Vec3___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, *args):
r"""
__init__(b2Vec3 self, float32 x, float32 y, float32 z) -> b2Vec3
__init__(b2Vec3 self) -> b2Vec3
__init__(b2Vec3 self, b2Vec3 other) -> b2Vec3
__init__(b2Vec3 self, b2Vec2 other) -> b2Vec3
Construct using coordinates.
"""
_Box2D.b2Vec3_swiginit(self, _Box2D.new_b2Vec3(*args))
__iter__ = lambda self: iter( (self.x, self.y, self.z) )
__eq__ = lambda self, other: (self.x == other.x and self.y == other.y and self.z == other.z)
__ne__ = lambda self, other: (self.x != other.x or self.y != other.y or self.z != other.z)
def __repr__(self):
return "b2Vec3(%g,%g,%g)" % (self.x, self.y, self.z)
def __len__(self):
return 3
def __neg__(self):
return b2Vec3(-self.x, -self.y, -self.z)
def copy(self):
"""
Return a copy of the vector.
Remember that the following:
a = b2Vec3()
b = a
Does not copy the vector itself, but b now refers to a.
"""
return b2Vec3(self.x, self.y, self.z)
__copy__ = copy
def __iadd__(self, other):
self.__add_vector(other)
return self
def __isub__(self, other):
self.__sub_vector(other)
return self
def __imul__(self, a):
self.__mul_float(a)
return self
def __itruediv__(self, a):
self.__div_float(a)
return self
def __idiv__(self, a):
self.__div_float(a)
return self
def dot(self, v):
"""
Dot product with v (list/tuple or b2Vec3)
"""
if isinstance(v, (list, tuple)):
return self.x*v[0] + self.y*v[1] + self.z*v[2]
else:
return self.x*v.x + self.y*v.y + self.z*v.z
def __set(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __nonzero__(self):
return self.x!=0.0 or self.y!=0.0 or self.z!=0.0
tuple = property(lambda self: (self.x, self.y, self.z), lambda self, value: self.__set(*value))
length = property(_Box2D.b2Vec3___Length, None)
lengthSquared = property(_Box2D.b2Vec3___LengthSquared, None)
valid = property(_Box2D.b2Vec3___IsValid, None)
cross = _swig_new_instance_method(_Box2D.b2Vec3_cross)
__getitem__ = _swig_new_instance_method(_Box2D.b2Vec3___getitem__)
__setitem__ = _swig_new_instance_method(_Box2D.b2Vec3___setitem__)
__IsValid = _swig_new_instance_method(_Box2D.b2Vec3___IsValid)
__Length = _swig_new_instance_method(_Box2D.b2Vec3___Length)
__LengthSquared = _swig_new_instance_method(_Box2D.b2Vec3___LengthSquared)
__truediv__ = _swig_new_instance_method(_Box2D.b2Vec3___truediv__)
__div__ = _swig_new_instance_method(_Box2D.b2Vec3___div__)
__mul__ = _swig_new_instance_method(_Box2D.b2Vec3___mul__)
__add__ = _swig_new_instance_method(_Box2D.b2Vec3___add__)
__sub__ = _swig_new_instance_method(_Box2D.b2Vec3___sub__)
__rmul__ = _swig_new_instance_method(_Box2D.b2Vec3___rmul__)
__rdiv__ = _swig_new_instance_method(_Box2D.b2Vec3___rdiv__)
__div_float = _swig_new_instance_method(_Box2D.b2Vec3___div_float)
__swig_destroy__ = _Box2D.delete_b2Vec3
# Register b2Vec3 in _Box2D:
_Box2D.b2Vec3_swigregister(b2Vec3)
class b2Mat22(object):
r"""A 2-by-2 matrix. Stored in column-major order."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
SetIdentity = _swig_new_instance_method(_Box2D.b2Mat22_SetIdentity)
SetZero = _swig_new_instance_method(_Box2D.b2Mat22_SetZero)
__GetInverse = _swig_new_instance_method(_Box2D.b2Mat22___GetInverse)
Solve = _swig_new_instance_method(_Box2D.b2Mat22_Solve)
col1 = property(_Box2D.b2Mat22_col1_get, _Box2D.b2Mat22_col1_set, doc=r"""col1 : b2Vec2""")
col2 = property(_Box2D.b2Mat22_col2_get, _Box2D.b2Mat22_col2_set, doc=r"""col2 : b2Vec2""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Mat22___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, *args):
r"""
__init__(b2Mat22 self, b2Vec2 c1, b2Vec2 c2) -> b2Mat22
__init__(b2Mat22 self, float32 a11, float32 a12, float32 a21, float32 a22) -> b2Mat22
__init__(b2Mat22 self) -> b2Mat22
Construct this matrix using an angle. This matrix becomes an orthonormal rotation matrix.
"""
_Box2D.b2Mat22_swiginit(self, _Box2D.new_b2Mat22(*args))
__GetAngle = _swig_new_instance_method(_Box2D.b2Mat22___GetAngle)
__SetAngle = _swig_new_instance_method(_Box2D.b2Mat22___SetAngle)
# Read-only
inverse = property(__GetInverse, None)
angle = property(__GetAngle, __SetAngle)
ex = property(lambda self: self.col1,
lambda self, v: setattr(self, 'col1', v))
ey = property(lambda self: self.col2,
lambda self, v: setattr(self, 'col2', v))
set = __SetAngle
__mul__ = _swig_new_instance_method(_Box2D.b2Mat22___mul__)
__add__ = _swig_new_instance_method(_Box2D.b2Mat22___add__)
__sub__ = _swig_new_instance_method(_Box2D.b2Mat22___sub__)
__iadd = _swig_new_instance_method(_Box2D.b2Mat22___iadd)
__isub = _swig_new_instance_method(_Box2D.b2Mat22___isub)
__swig_destroy__ = _Box2D.delete_b2Mat22
# Register b2Mat22 in _Box2D:
_Box2D.b2Mat22_swigregister(b2Mat22)
class b2Mat33(object):
r"""A 3-by-3 matrix. Stored in column-major order."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
SetZero = _swig_new_instance_method(_Box2D.b2Mat33_SetZero)
Solve33 = _swig_new_instance_method(_Box2D.b2Mat33_Solve33)
Solve22 = _swig_new_instance_method(_Box2D.b2Mat33_Solve22)
GetInverse22 = _swig_new_instance_method(_Box2D.b2Mat33_GetInverse22)
GetSymInverse33 = _swig_new_instance_method(_Box2D.b2Mat33_GetSymInverse33)
col1 = property(_Box2D.b2Mat33_col1_get, _Box2D.b2Mat33_col1_set, doc=r"""col1 : b2Vec3""")
col2 = property(_Box2D.b2Mat33_col2_get, _Box2D.b2Mat33_col2_set, doc=r"""col2 : b2Vec3""")
col3 = property(_Box2D.b2Mat33_col3_get, _Box2D.b2Mat33_col3_set, doc=r"""col3 : b2Vec3""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Mat33___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, *args):
r"""
__init__(b2Mat33 self, b2Vec3 c1, b2Vec3 c2, b2Vec3 c3) -> b2Mat33
__init__(b2Mat33 self) -> b2Mat33
Construct this matrix using columns.
"""
_Box2D.b2Mat33_swiginit(self, _Box2D.new_b2Mat33(*args))
ex = property(lambda self: self.col1, lambda self, v: setattr(self, 'col1', v))
ey = property(lambda self: self.col2, lambda self, v: setattr(self, 'col2', v))
ez = property(lambda self: self.col3, lambda self, v: setattr(self, 'col3', v))
__mul__ = _swig_new_instance_method(_Box2D.b2Mat33___mul__)
__add__ = _swig_new_instance_method(_Box2D.b2Mat33___add__)
__sub__ = _swig_new_instance_method(_Box2D.b2Mat33___sub__)
__iadd = _swig_new_instance_method(_Box2D.b2Mat33___iadd)
__isub = _swig_new_instance_method(_Box2D.b2Mat33___isub)
__swig_destroy__ = _Box2D.delete_b2Mat33
# Register b2Mat33 in _Box2D:
_Box2D.b2Mat33_swigregister(b2Mat33)
class b2Rot(object):
r"""Proxy of C++ b2Rot class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(b2Rot self) -> b2Rot
__init__(b2Rot self, float32 angle) -> b2Rot
"""
_Box2D.b2Rot_swiginit(self, _Box2D.new_b2Rot(*args))
__SetAngle = _swig_new_instance_method(_Box2D.b2Rot___SetAngle)
SetIdentity = _swig_new_instance_method(_Box2D.b2Rot_SetIdentity)
__GetAngle = _swig_new_instance_method(_Box2D.b2Rot___GetAngle)
GetXAxis = _swig_new_instance_method(_Box2D.b2Rot_GetXAxis)
GetYAxis = _swig_new_instance_method(_Box2D.b2Rot_GetYAxis)
s = property(_Box2D.b2Rot_s_get, _Box2D.b2Rot_s_set, doc=r"""s : float32""")
c = property(_Box2D.b2Rot_c_get, _Box2D.b2Rot_c_set, doc=r"""c : float32""")
angle = property(__GetAngle, __SetAngle)
x_axis = property(GetXAxis, None)
y_axis = property(GetYAxis, None)
__mul__ = _swig_new_instance_method(_Box2D.b2Rot___mul__)
__swig_destroy__ = _Box2D.delete_b2Rot
# Register b2Rot in _Box2D:
_Box2D.b2Rot_swigregister(b2Rot)
class b2Transform(object):
r"""A transform contains translation and rotation. It is used to represent the position and orientation of rigid frames."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(b2Transform self) -> b2Transform
__init__(b2Transform self, b2Vec2 position, b2Rot rotation) -> b2Transform
Initialize using a position vector and a rotation matrix.
"""
_Box2D.b2Transform_swiginit(self, _Box2D.new_b2Transform(*args))
SetIdentity = _swig_new_instance_method(_Box2D.b2Transform_SetIdentity)
Set = _swig_new_instance_method(_Box2D.b2Transform_Set)
position = property(_Box2D.b2Transform_position_get, _Box2D.b2Transform_position_set, doc=r"""position : b2Vec2""")
q = property(_Box2D.b2Transform_q_get, _Box2D.b2Transform_q_set, doc=r"""q : b2Rot""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Transform___hash__)
def __repr__(self):
return _format_repr(self)
__get_rotation_matrix = _swig_new_instance_method(_Box2D.b2Transform___get_rotation_matrix)
def __get_angle(self):
return self.q.angle
def __set_angle(self, angle):
self.q.angle = angle
def __set_rotation_matrix(self, rot_matrix):
self.q.angle = rot_matrix.angle
angle = property(__get_angle, __set_angle)
R = property(__get_rotation_matrix, __set_rotation_matrix)
__mul__ = _swig_new_instance_method(_Box2D.b2Transform___mul__)
__swig_destroy__ = _Box2D.delete_b2Transform
# Register b2Transform in _Box2D:
_Box2D.b2Transform_swigregister(b2Transform)
class b2Sweep(object):
r"""This describes the motion of a body/shape for TOI computation. Shapes are defined with respect to the body origin, which may no coincide with the center of mass. However, to support dynamics we must interpolate the center of mass position."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Advance = _swig_new_instance_method(_Box2D.b2Sweep_Advance)
Normalize = _swig_new_instance_method(_Box2D.b2Sweep_Normalize)
localCenter = property(_Box2D.b2Sweep_localCenter_get, _Box2D.b2Sweep_localCenter_set, doc=r"""localCenter : b2Vec2""")
c0 = property(_Box2D.b2Sweep_c0_get, _Box2D.b2Sweep_c0_set, doc=r"""c0 : b2Vec2""")
c = property(_Box2D.b2Sweep_c_get, _Box2D.b2Sweep_c_set, doc=r"""c : b2Vec2""")
a0 = property(_Box2D.b2Sweep_a0_get, _Box2D.b2Sweep_a0_set, doc=r"""a0 : float32""")
a = property(_Box2D.b2Sweep_a_get, _Box2D.b2Sweep_a_set, doc=r"""a : float32""")
alpha0 = property(_Box2D.b2Sweep_alpha0_get, _Box2D.b2Sweep_alpha0_set, doc=r"""alpha0 : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Sweep___hash__)
def __repr__(self):
return _format_repr(self)
GetTransform = _swig_new_instance_method(_Box2D.b2Sweep_GetTransform)
def __init__(self, **kwargs):
_Box2D.b2Sweep_swiginit(self,_Box2D.new_b2Sweep())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2Sweep
# Register b2Sweep in _Box2D:
_Box2D.b2Sweep_swigregister(b2Sweep)
b2DistanceSquared = _Box2D.b2DistanceSquared
b2Dot = _Box2D.b2Dot
b2Cross = _Box2D.b2Cross
b2Mul22 = _Box2D.b2Mul22
b2Mul = _Box2D.b2Mul
b2MulT = _Box2D.b2MulT
b2Abs = _Box2D.b2Abs
b2Min = _Box2D.b2Min
b2Max = _Box2D.b2Max
b2Clamp = _Box2D.b2Clamp
b2NextPowerOfTwo = _Box2D.b2NextPowerOfTwo
b2IsPowerOfTwo = _Box2D.b2IsPowerOfTwo
class b2ContactFeature(object):
r"""The features that intersect to form the contact point This must be 4 bytes or less."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
e_vertex = _Box2D.b2ContactFeature_e_vertex
e_face = _Box2D.b2ContactFeature_e_face
indexA = property(_Box2D.b2ContactFeature_indexA_get, _Box2D.b2ContactFeature_indexA_set, doc=r"""indexA : uint8""")
indexB = property(_Box2D.b2ContactFeature_indexB_get, _Box2D.b2ContactFeature_indexB_set, doc=r"""indexB : uint8""")
typeA = property(_Box2D.b2ContactFeature_typeA_get, _Box2D.b2ContactFeature_typeA_set, doc=r"""typeA : uint8""")
typeB = property(_Box2D.b2ContactFeature_typeB_get, _Box2D.b2ContactFeature_typeB_set, doc=r"""typeB : uint8""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactFeature___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self):
r"""
__init__(b2ContactFeature self) -> b2ContactFeature
The features that intersect to form the contact point This must be 4 bytes or less.
"""
_Box2D.b2ContactFeature_swiginit(self, _Box2D.new_b2ContactFeature())
__swig_destroy__ = _Box2D.delete_b2ContactFeature
# Register b2ContactFeature in _Box2D:
_Box2D.b2ContactFeature_swigregister(b2ContactFeature)
b2Vec2_zero = b2Globals.b2Vec2_zero
b2_nullFeature = b2Globals.b2_nullFeature
class b2ContactID(object):
r"""Proxy of C++ b2ContactID class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
cf = property(_Box2D.b2ContactID_cf_get, _Box2D.b2ContactID_cf_set, doc=r"""cf : b2ContactFeature""")
key = property(_Box2D.b2ContactID_key_get, _Box2D.b2ContactID_key_set, doc=r"""key : uint32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactID___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2ContactID_swiginit(self,_Box2D.new_b2ContactID())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ContactID
# Register b2ContactID in _Box2D:
_Box2D.b2ContactID_swigregister(b2ContactID)
class b2ManifoldPoint(object):
r"""A manifold point is a contact point belonging to a contact manifold. It holds details related to the geometry and dynamics of the contact points. The local point usage depends on the manifold type: -e_circles: the local center of circleB -e_faceA: the local center of cirlceB or the clip point of polygonB -e_faceB: the clip point of polygonA This structure is stored across time steps, so we keep it small. Note: the impulses are used for internal caching and may not provide reliable contact forces, especially for high speed collisions."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
localPoint = property(_Box2D.b2ManifoldPoint_localPoint_get, _Box2D.b2ManifoldPoint_localPoint_set, doc=r"""localPoint : b2Vec2""")
normalImpulse = property(_Box2D.b2ManifoldPoint_normalImpulse_get, _Box2D.b2ManifoldPoint_normalImpulse_set, doc=r"""normalImpulse : float32""")
tangentImpulse = property(_Box2D.b2ManifoldPoint_tangentImpulse_get, _Box2D.b2ManifoldPoint_tangentImpulse_set, doc=r"""tangentImpulse : float32""")
id = property(_Box2D.b2ManifoldPoint_id_get, _Box2D.b2ManifoldPoint_id_set, doc=r"""id : b2ContactID""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ManifoldPoint___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2ManifoldPoint_swiginit(self,_Box2D.new_b2ManifoldPoint())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ManifoldPoint
# Register b2ManifoldPoint in _Box2D:
_Box2D.b2ManifoldPoint_swigregister(b2ManifoldPoint)
class b2Manifold(object):
r"""
A manifold for two touching convex shapes. Box2D supports multiple types of contact:
clip point versus plane with radius
point versus point with radius (circles) The local point usage depends on the manifold type: -e_circles: the local center of circleA -e_faceA: the center of faceA -e_faceB: the center of faceB Similarly the local normal usage: -e_circles: not used -e_faceA: the normal on polygonA -e_faceB: the normal on polygonB We store contacts in this way so that position correction can account for movement, which is critical for continuous physics. All contact scenarios must be expressed in one of these types. This structure is stored across time steps, so we keep it small.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
e_circles = _Box2D.b2Manifold_e_circles
e_faceA = _Box2D.b2Manifold_e_faceA
e_faceB = _Box2D.b2Manifold_e_faceB
localNormal = property(_Box2D.b2Manifold_localNormal_get, _Box2D.b2Manifold_localNormal_set, doc=r"""localNormal : b2Vec2""")
localPoint = property(_Box2D.b2Manifold_localPoint_get, _Box2D.b2Manifold_localPoint_set, doc=r"""localPoint : b2Vec2""")
type_ = property(_Box2D.b2Manifold_type__get, _Box2D.b2Manifold_type__set, doc=r"""type_ : b2Manifold::Type""")
pointCount = property(_Box2D.b2Manifold_pointCount_get, _Box2D.b2Manifold_pointCount_set, doc=r"""pointCount : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Manifold___hash__)
def __repr__(self):
return _format_repr(self)
def __GetPoints(self):
return [self.__GetPoint(i) for i in range(self.pointCount)]
points = property(__GetPoints, None)
__GetPoint = _swig_new_instance_method(_Box2D.b2Manifold___GetPoint)
def __init__(self, **kwargs):
_Box2D.b2Manifold_swiginit(self,_Box2D.new_b2Manifold())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2Manifold
# Register b2Manifold in _Box2D:
_Box2D.b2Manifold_swigregister(b2Manifold)
class b2WorldManifold(object):
r"""This is used to compute the current state of a contact manifold."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Initialize = _swig_new_instance_method(_Box2D.b2WorldManifold_Initialize)
normal = property(_Box2D.b2WorldManifold_normal_get, _Box2D.b2WorldManifold_normal_set, doc=r"""normal : b2Vec2""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2WorldManifold___hash__)
def __repr__(self):
return _format_repr(self)
__get_points = _swig_new_instance_method(_Box2D.b2WorldManifold___get_points)
points = property(__get_points, None)
def __init__(self, **kwargs):
_Box2D.b2WorldManifold_swiginit(self,_Box2D.new_b2WorldManifold())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2WorldManifold
# Register b2WorldManifold in _Box2D:
_Box2D.b2WorldManifold_swigregister(b2WorldManifold)
b2_nullState = _Box2D.b2_nullState
b2_addState = _Box2D.b2_addState
b2_persistState = _Box2D.b2_persistState
b2_removeState = _Box2D.b2_removeState
class b2ClipVertex(object):
r"""Used for computing contact manifolds."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
v = property(_Box2D.b2ClipVertex_v_get, _Box2D.b2ClipVertex_v_set, doc=r"""v : b2Vec2""")
id = property(_Box2D.b2ClipVertex_id_get, _Box2D.b2ClipVertex_id_set, doc=r"""id : b2ContactID""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ClipVertex___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2ClipVertex_swiginit(self,_Box2D.new_b2ClipVertex())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ClipVertex
# Register b2ClipVertex in _Box2D:
_Box2D.b2ClipVertex_swigregister(b2ClipVertex)
class b2RayCastInput(object):
r"""Ray-cast input data. The ray extends from p1 to p1 + maxFraction * (p2 - p1)."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
p1 = property(_Box2D.b2RayCastInput_p1_get, _Box2D.b2RayCastInput_p1_set, doc=r"""p1 : b2Vec2""")
p2 = property(_Box2D.b2RayCastInput_p2_get, _Box2D.b2RayCastInput_p2_set, doc=r"""p2 : b2Vec2""")
maxFraction = property(_Box2D.b2RayCastInput_maxFraction_get, _Box2D.b2RayCastInput_maxFraction_set, doc=r"""maxFraction : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RayCastInput___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2RayCastInput_swiginit(self,_Box2D.new_b2RayCastInput())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2RayCastInput
# Register b2RayCastInput in _Box2D:
_Box2D.b2RayCastInput_swigregister(b2RayCastInput)
class b2RayCastOutput(object):
r"""Ray-cast output data. The ray hits at p1 + fraction * (p2 - p1), where p1 and p2 come from b2RayCastInput."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
normal = property(_Box2D.b2RayCastOutput_normal_get, _Box2D.b2RayCastOutput_normal_set, doc=r"""normal : b2Vec2""")
fraction = property(_Box2D.b2RayCastOutput_fraction_get, _Box2D.b2RayCastOutput_fraction_set, doc=r"""fraction : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RayCastOutput___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2RayCastOutput_swiginit(self,_Box2D.new_b2RayCastOutput())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2RayCastOutput
# Register b2RayCastOutput in _Box2D:
_Box2D.b2RayCastOutput_swigregister(b2RayCastOutput)
class b2AABB(object):
r"""An axis aligned bounding box."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__IsValid = _swig_new_instance_method(_Box2D.b2AABB___IsValid)
__GetCenter = _swig_new_instance_method(_Box2D.b2AABB___GetCenter)
__GetExtents = _swig_new_instance_method(_Box2D.b2AABB___GetExtents)
__GetPerimeter = _swig_new_instance_method(_Box2D.b2AABB___GetPerimeter)
Combine = _swig_new_instance_method(_Box2D.b2AABB_Combine)
RayCast = _swig_new_instance_method(_Box2D.b2AABB_RayCast)
lowerBound = property(_Box2D.b2AABB_lowerBound_get, _Box2D.b2AABB_lowerBound_set, doc=r"""lowerBound : b2Vec2""")
upperBound = property(_Box2D.b2AABB_upperBound_get, _Box2D.b2AABB_upperBound_set, doc=r"""upperBound : b2Vec2""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2AABB___hash__)
def __repr__(self):
return _format_repr(self)
# Read-only
valid = property(__IsValid, None)
extents = property(__GetExtents, None)
center = property(__GetCenter, None)
perimeter = property(__GetPerimeter, None)
__contains__ = _swig_new_instance_method(_Box2D.b2AABB___contains__)
overlaps = _swig_new_instance_method(_Box2D.b2AABB_overlaps)
def __init__(self, **kwargs):
_Box2D.b2AABB_swiginit(self,_Box2D.new_b2AABB())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2AABB
# Register b2AABB in _Box2D:
_Box2D.b2AABB_swigregister(b2AABB)
b2CollideCircles = _Box2D.b2CollideCircles
b2CollidePolygonAndCircle = _Box2D.b2CollidePolygonAndCircle
b2CollidePolygons = _Box2D.b2CollidePolygons
b2CollideEdgeAndCircle = _Box2D.b2CollideEdgeAndCircle
b2CollideEdgeAndPolygon = _Box2D.b2CollideEdgeAndPolygon
b2ClipSegmentToLine = _Box2D.b2ClipSegmentToLine
b2TestOverlap = _Box2D.b2TestOverlap
class _b2Vec2Array(object):
r"""Proxy of C++ _b2Vec2Array class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, nelements):
r"""__init__(_b2Vec2Array self, size_t nelements) -> _b2Vec2Array"""
_Box2D._b2Vec2Array_swiginit(self, _Box2D.new__b2Vec2Array(nelements))
__swig_destroy__ = _Box2D.delete__b2Vec2Array
__getitem__ = _swig_new_instance_method(_Box2D._b2Vec2Array___getitem__)
__setitem__ = _swig_new_instance_method(_Box2D._b2Vec2Array___setitem__)
cast = _swig_new_instance_method(_Box2D._b2Vec2Array_cast)
frompointer = _swig_new_static_method(_Box2D._b2Vec2Array_frompointer)
# Register _b2Vec2Array in _Box2D:
_Box2D._b2Vec2Array_swigregister(_b2Vec2Array)
_b2Vec2Array_frompointer = _Box2D._b2Vec2Array_frompointer
e_convertVertices = _Box2D.e_convertVertices
class b2Color(object):
r"""Color for debug drawing. Each value has the range [0,1]."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Set = _swig_new_instance_method(_Box2D.b2Color_Set)
r = property(_Box2D.b2Color_r_get, _Box2D.b2Color_r_set, doc=r"""r : float32""")
g = property(_Box2D.b2Color_g_get, _Box2D.b2Color_g_set, doc=r"""g : float32""")
b = property(_Box2D.b2Color_b_get, _Box2D.b2Color_b_set, doc=r"""b : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Color___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, *args):
r"""
__init__(b2Color self) -> b2Color
__init__(b2Color self, float32 r, float32 g, float32 b) -> b2Color
__init__(b2Color self, b2Color other) -> b2Color
Color for debug drawing. Each value has the range [0,1].
"""
_Box2D.b2Color_swiginit(self, _Box2D.new_b2Color(*args))
__get_bytes = _swig_new_instance_method(_Box2D.b2Color___get_bytes)
__iter__ = lambda self: iter((self.r, self.g, self.b))
__eq__ = lambda self, other: self.__equ(other)
__ne__ = lambda self,other: not self.__equ(other)
def __repr__(self):
return "b2Color(%g,%g,%g)" % (self.r, self.g, self.b)
def __len__(self):
return 3
def __copy__(self):
return b2Color(self.r, self.g, self.b)
def copy(self):
return b2Color(self.r, self.g, self.b)
def __set_bytes(self, value):
if len(value) != 3:
raise ValueError('Expected length 3 list')
self.r, self.g, self.b = value[0]/255, value[1]/255, value[2]/255
def __set_tuple(self, value):
if len(value) != 3:
raise ValueError('Expected length 3 list')
self.r, self.g, self.b = value[0], value[1], value[2]
def __nonzero__(self):
return self.r!=0.0 or self.g!=0.0 or self.b!=0.0
list = property(lambda self: list(self), __set_tuple)
bytes = property(__get_bytes, __set_bytes)
__getitem__ = _swig_new_instance_method(_Box2D.b2Color___getitem__)
__setitem__ = _swig_new_instance_method(_Box2D.b2Color___setitem__)
__truediv__ = _swig_new_instance_method(_Box2D.b2Color___truediv__)
__add__ = _swig_new_instance_method(_Box2D.b2Color___add__)
__sub__ = _swig_new_instance_method(_Box2D.b2Color___sub__)
__div__ = _swig_new_instance_method(_Box2D.b2Color___div__)
__rmul__ = _swig_new_instance_method(_Box2D.b2Color___rmul__)
__mul__ = _swig_new_instance_method(_Box2D.b2Color___mul__)
__isub = _swig_new_instance_method(_Box2D.b2Color___isub)
__itruediv = _swig_new_instance_method(_Box2D.b2Color___itruediv)
__idiv = _swig_new_instance_method(_Box2D.b2Color___idiv)
__imul = _swig_new_instance_method(_Box2D.b2Color___imul)
__iadd = _swig_new_instance_method(_Box2D.b2Color___iadd)
__equ = _swig_new_instance_method(_Box2D.b2Color___equ)
__swig_destroy__ = _Box2D.delete_b2Color
# Register b2Color in _Box2D:
_Box2D.b2Color_swigregister(b2Color)
class b2Draw(object):
r"""Implement and register this class with a b2Worldto provide debug drawing of physics entities in your game."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
if self.__class__ == b2Draw:
_self = None
else:
_self = self
_Box2D.b2Draw_swiginit(self,_Box2D.new_b2Draw(_self, ))
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2Draw
e_shapeBit = _Box2D.b2Draw_e_shapeBit
e_jointBit = _Box2D.b2Draw_e_jointBit
e_aabbBit = _Box2D.b2Draw_e_aabbBit
e_pairBit = _Box2D.b2Draw_e_pairBit
e_centerOfMassBit = _Box2D.b2Draw_e_centerOfMassBit
__SetFlags = _swig_new_instance_method(_Box2D.b2Draw___SetFlags)
__GetFlags = _swig_new_instance_method(_Box2D.b2Draw___GetFlags)
AppendFlags = _swig_new_instance_method(_Box2D.b2Draw_AppendFlags)
ClearFlags = _swig_new_instance_method(_Box2D.b2Draw_ClearFlags)
DrawPolygon = _swig_new_instance_method(_Box2D.b2Draw_DrawPolygon)
DrawSolidPolygon = _swig_new_instance_method(_Box2D.b2Draw_DrawSolidPolygon)
DrawCircle = _swig_new_instance_method(_Box2D.b2Draw_DrawCircle)
DrawSolidCircle = _swig_new_instance_method(_Box2D.b2Draw_DrawSolidCircle)
DrawSegment = _swig_new_instance_method(_Box2D.b2Draw_DrawSegment)
DrawTransform = _swig_new_instance_method(_Box2D.b2Draw_DrawTransform)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Draw___hash__)
def __repr__(self):
return _format_repr(self)
_flag_entries = [
['drawShapes', e_shapeBit],
['drawJoints', e_jointBit ],
['drawAABBs', e_aabbBit ],
['drawPairs', e_pairBit ],
['drawCOMs', e_centerOfMassBit ],
['convertVertices', e_convertVertices ],
]
def _SetFlags(self, value):
flags = 0
for name_, mask in self._flag_entries:
if name_ in value and value[name_]:
flags |= mask
self.__SetFlags(flags)
def _GetFlags(self):
flags = self.__GetFlags()
ret={}
for name_, mask in self._flag_entries:
ret[name_]=((flags & mask)==mask)
return ret
flags=property(_GetFlags, _SetFlags, doc='Sets whether or not shapes, joints, etc. will be drawn.')
def __disown__(self):
self.this.disown()
_Box2D.disown_b2Draw(self)
return weakref.proxy(self)
# Register b2Draw in _Box2D:
_Box2D.b2Draw_swigregister(b2Draw)
class b2DrawExtended(b2Draw):
r"""Proxy of C++ b2DrawExtended class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
convertVertices = property(_Box2D.b2DrawExtended_convertVertices_get, _Box2D.b2DrawExtended_convertVertices_set, doc=r"""convertVertices : bool""")
center = property(_Box2D.b2DrawExtended_center_get, _Box2D.b2DrawExtended_center_set, doc=r"""center : b2Vec2""")
offset = property(_Box2D.b2DrawExtended_offset_get, _Box2D.b2DrawExtended_offset_set, doc=r"""offset : b2Vec2""")
zoom = property(_Box2D.b2DrawExtended_zoom_get, _Box2D.b2DrawExtended_zoom_set, doc=r"""zoom : float32""")
screenSize = property(_Box2D.b2DrawExtended_screenSize_get, _Box2D.b2DrawExtended_screenSize_set, doc=r"""screenSize : b2Vec2""")
flipY = property(_Box2D.b2DrawExtended_flipY_get, _Box2D.b2DrawExtended_flipY_set, doc=r"""flipY : bool""")
flipX = property(_Box2D.b2DrawExtended_flipX_get, _Box2D.b2DrawExtended_flipX_set, doc=r"""flipX : bool""")
__Convert = _swig_new_instance_method(_Box2D.b2DrawExtended___Convert)
to_screen = _swig_new_instance_method(_Box2D.b2DrawExtended_to_screen)
DrawPolygon = _swig_new_instance_method(_Box2D.b2DrawExtended_DrawPolygon)
DrawSolidPolygon = _swig_new_instance_method(_Box2D.b2DrawExtended_DrawSolidPolygon)
DrawCircle = _swig_new_instance_method(_Box2D.b2DrawExtended_DrawCircle)
DrawSolidCircle = _swig_new_instance_method(_Box2D.b2DrawExtended_DrawSolidCircle)
DrawSegment = _swig_new_instance_method(_Box2D.b2DrawExtended_DrawSegment)
DrawTransform = _swig_new_instance_method(_Box2D.b2DrawExtended_DrawTransform)
__SetFlags = _swig_new_instance_method(_Box2D.b2DrawExtended___SetFlags)
__swig_destroy__ = _Box2D.delete_b2DrawExtended
def __init__(self, **kwargs):
if self.__class__ == b2DrawExtended:
_self = None
else:
_self = self
_Box2D.b2DrawExtended_swiginit(self,_Box2D.new_b2DrawExtended(_self, ))
_init_kwargs(self, **kwargs)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DrawExtended___hash__)
def __repr__(self):
return _format_repr(self)
def __disown__(self):
self.this.disown()
_Box2D.disown_b2DrawExtended(self)
return weakref.proxy(self)
# Register b2DrawExtended in _Box2D:
_Box2D.b2DrawExtended_swigregister(b2DrawExtended)
class b2MassData(object):
r"""This holds the mass data computed for a shape."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
mass = property(_Box2D.b2MassData_mass_get, _Box2D.b2MassData_mass_set, doc=r"""mass : float32""")
center = property(_Box2D.b2MassData_center_get, _Box2D.b2MassData_center_set, doc=r"""center : b2Vec2""")
I = property(_Box2D.b2MassData_I_get, _Box2D.b2MassData_I_set, doc=r"""I : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2MassData___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2MassData_swiginit(self,_Box2D.new_b2MassData())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2MassData
# Register b2MassData in _Box2D:
_Box2D.b2MassData_swigregister(b2MassData)
b2_chunkSize = b2Globals.b2_chunkSize
b2_maxBlockSize = b2Globals.b2_maxBlockSize
b2_blockSizes = b2Globals.b2_blockSizes
b2_chunkArrayIncrement = b2Globals.b2_chunkArrayIncrement
class b2Shape(object):
r"""A shape is used for collision detection. You can create a shape however you like. Shapes used for simulation in b2Worldare created automatically when a b2Fixtureis created. Shapes may encapsulate a one or more child shapes."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
e_circle = _Box2D.b2Shape_e_circle
e_edge = _Box2D.b2Shape_e_edge
e_polygon = _Box2D.b2Shape_e_polygon
e_chain = _Box2D.b2Shape_e_chain
e_typeCount = _Box2D.b2Shape_e_typeCount
__swig_destroy__ = _Box2D.delete_b2Shape
__GetType = _swig_new_instance_method(_Box2D.b2Shape___GetType)
__GetChildCount = _swig_new_instance_method(_Box2D.b2Shape___GetChildCount)
TestPoint = _swig_new_instance_method(_Box2D.b2Shape_TestPoint)
RayCast = _swig_new_instance_method(_Box2D.b2Shape_RayCast)
__ComputeAABB = _swig_new_instance_method(_Box2D.b2Shape___ComputeAABB)
__ComputeMass = _swig_new_instance_method(_Box2D.b2Shape___ComputeMass)
radius = property(_Box2D.b2Shape_radius_get, _Box2D.b2Shape_radius_set, doc=r"""radius : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Shape___hash__)
def __repr__(self):
return _format_repr(self)
__eq__ = b2ShapeCompare
__ne__ = lambda self,other: not b2ShapeCompare(self,other)
# Read-only
type = property(__GetType, None)
def getAsType(self):
return self
@property
def childCount(self):
"""
Get the number of child primitives.
"""
return self.__GetChildCount()
def getAABB(self, transform, childIndex):
"""
Given a transform, compute the associated axis aligned bounding box for a child shape.
"""
if childIndex >= self.childCount:
raise ValueError('Child index should be at most childCount=%d' % self.childCount)
aabb=b2AABB()
self.__ComputeAABB(aabb, transform, childIndex)
return aabb
def getMass(self, density):
"""
Compute the mass properties of this shape using its dimensions and density.
The inertia tensor is computed about the local origin.
"""
m=b2MassData()
self.__ComputeMass(m, density)
return m
# Register b2Shape in _Box2D:
_Box2D.b2Shape_swigregister(b2Shape)
class b2CircleShape(b2Shape):
r"""A circle shape."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2CircleShape_swiginit(self,_Box2D.new_b2CircleShape())
_init_kwargs(self, **kwargs)
pos = property(_Box2D.b2CircleShape_pos_get, _Box2D.b2CircleShape_pos_set, doc=r"""pos : b2Vec2""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2CircleShape___hash__)
def __repr__(self):
return _format_repr(self)
__swig_destroy__ = _Box2D.delete_b2CircleShape
# Register b2CircleShape in _Box2D:
_Box2D.b2CircleShape_swigregister(b2CircleShape)
class b2EdgeShape(b2Shape):
r"""A line segment (edge) shape. These can be connected in chains or loops to other edge shapes. The connectivity information is used to ensure correct contact normals."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2EdgeShape_swiginit(self,_Box2D.new_b2EdgeShape())
_init_kwargs(self, **kwargs)
__Set = _swig_new_instance_method(_Box2D.b2EdgeShape___Set)
vertex1 = property(_Box2D.b2EdgeShape_vertex1_get, _Box2D.b2EdgeShape_vertex1_set, doc=r"""vertex1 : b2Vec2""")
vertex2 = property(_Box2D.b2EdgeShape_vertex2_get, _Box2D.b2EdgeShape_vertex2_set, doc=r"""vertex2 : b2Vec2""")
vertex0 = property(_Box2D.b2EdgeShape_vertex0_get, _Box2D.b2EdgeShape_vertex0_set, doc=r"""vertex0 : b2Vec2""")
vertex3 = property(_Box2D.b2EdgeShape_vertex3_get, _Box2D.b2EdgeShape_vertex3_set, doc=r"""vertex3 : b2Vec2""")
hasVertex0 = property(_Box2D.b2EdgeShape_hasVertex0_get, _Box2D.b2EdgeShape_hasVertex0_set, doc=r"""hasVertex0 : bool""")
hasVertex3 = property(_Box2D.b2EdgeShape_hasVertex3_get, _Box2D.b2EdgeShape_hasVertex3_set, doc=r"""hasVertex3 : bool""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2EdgeShape___hash__)
def __repr__(self):
return _format_repr(self)
def __repr__(self):
return "b2EdgeShape(vertices: %s)" % (self.vertices)
@property
def all_vertices(self):
"""Returns all of the vertices as a list of tuples [ (x0,y0), (x1,y1), (x2,y2) (x3,y3) ]
Note that the validity of vertices 0 and 4 depend on whether or not
hasVertex0 and hasVertex3 are set.
"""
return [tuple(self.vertex0), tuple(self.vertex1), tuple(self.vertex2), tuple(self.vertex3)]
def __get_vertices(self):
"""Returns the basic vertices as a list of tuples [ (x1,y1), (x2,y2) ]
To include the supporting vertices, see 'all_vertices'
If you want to set vertex3 but not vertex0, pass in None for vertex0.
"""
return [tuple(self.vertex1), tuple(self.vertex2)]
def __set_vertices(self, vertices):
if len(vertices)==2:
self.vertex1, self.vertex2=vertices
self.hasVertex0=False
self.hasVertex3=False
elif len(vertices)==3:
self.vertex0, self.vertex1, self.vertex2=vertices
self.hasVertex0=(vertices[0] != None)
self.hasVertex3=False
elif len(vertices)==4:
self.vertex0, self.vertex1, self.vertex2, self.vertex3=vertices
self.hasVertex0=(vertices[0] != None)
self.hasVertex3=True
else:
raise ValueError('Expected from 2 to 4 vertices.')
@property
def vertexCount(self):
"""
Returns the number of valid vertices (as in, it counts whether or not
hasVertex0 or hasVertex3 are set)
"""
if self.hasVertex0 and self.hasVertex3:
return 4
elif self.hasVertex0 or self.hasVertex3:
return 3
else:
return 2
def __iter__(self):
"""
Iterates over the vertices in the Edge
"""
for v in self.vertices:
yield v
vertices=property(__get_vertices, __set_vertices)
__swig_destroy__ = _Box2D.delete_b2EdgeShape
# Register b2EdgeShape in _Box2D:
_Box2D.b2EdgeShape_swigregister(b2EdgeShape)
class b2ChainShape(b2Shape):
r"""A loop shape is a free form sequence of line segments that form a circular list. The loop may cross upon itself, but this is not recommended for smooth collision. The loop has double sided collision, so you can use inside and outside collision. Therefore, you may use any winding order. Since there may be many vertices, they are allocated using b2Alloc."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2ChainShape_swiginit(self,_Box2D.new_b2ChainShape())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ChainShape
CreateLoop = _swig_new_instance_method(_Box2D.b2ChainShape_CreateLoop)
CreateChain = _swig_new_instance_method(_Box2D.b2ChainShape_CreateChain)
SetPrevVertex = _swig_new_instance_method(_Box2D.b2ChainShape_SetPrevVertex)
SetNextVertex = _swig_new_instance_method(_Box2D.b2ChainShape_SetNextVertex)
__GetChildEdge = _swig_new_instance_method(_Box2D.b2ChainShape___GetChildEdge)
m_prevVertex = property(_Box2D.b2ChainShape_m_prevVertex_get, _Box2D.b2ChainShape_m_prevVertex_set, doc=r"""m_prevVertex : b2Vec2""")
m_nextVertex = property(_Box2D.b2ChainShape_m_nextVertex_get, _Box2D.b2ChainShape_m_nextVertex_set, doc=r"""m_nextVertex : b2Vec2""")
m_hasPrevVertex = property(_Box2D.b2ChainShape_m_hasPrevVertex_get, _Box2D.b2ChainShape_m_hasPrevVertex_set, doc=r"""m_hasPrevVertex : bool""")
m_hasNextVertex = property(_Box2D.b2ChainShape_m_hasNextVertex_get, _Box2D.b2ChainShape_m_hasNextVertex_set, doc=r"""m_hasNextVertex : bool""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ChainShape___hash__)
def __repr__(self):
return _format_repr(self)
__get_vertices = _swig_new_instance_method(_Box2D.b2ChainShape___get_vertices)
def __repr__(self):
return "b2ChainShape(vertices: %s)" % (self.vertices)
def getChildEdge(self, index):
if childIndex >= self.childCount:
raise ValueError('Child index should be at most childCount=%d' % self.childCount)
edge=b2EdgeShape()
self.__GetChildEdge(edge, index)
return edge
@property
def edges(self):
return [self.getChildEdge(i) for i in range(self.childCount)]
@property
def vertexCount(self):
return self.__get_count()
def __get_vertices(self):
"""Returns all of the vertices as a list of tuples [ (x1,y1), (x2,y2) ... (xN,yN) ]"""
return [ (self.__get_vertex(i).x, self.__get_vertex(i).y )
for i in range(0, self.vertexCount)]
def __iter__(self):
"""
Iterates over the vertices in the Chain
"""
for v in self.vertices:
yield v
def __set_vertices(self, values, loop=True):
if not values or not isinstance(values, (list, tuple)) or (len(values) < 2):
raise ValueError('Expected tuple or list of length >= 2.')
for i,value in enumerate(values):
if isinstance(value, (tuple, list)):
if len(value) != 2:
raise ValueError('Expected tuple or list of length 2, got length %d' % len(value))
for j in value:
if not isinstance(j, (int, float)):
raise ValueError('Expected int or float values, got %s' % (type(j)))
elif isinstance(value, b2Vec2):
pass
else:
raise ValueError('Expected tuple, list, or b2Vec2, got %s' % type(value))
vecs=_b2Vec2Array(len(values))
for i, value in enumerate(values):
if isinstance(value, b2Vec2):
vecs[i]=value
else:
vecs[i]=b2Vec2(value)
self.__create(vecs, len(values), loop)
vertices = property(__get_vertices, __set_vertices)
vertices_chain = property(__get_vertices, lambda self, v : self.__set_vertices(v, loop=False))
vertices_loop = vertices
__create = _swig_new_instance_method(_Box2D.b2ChainShape___create)
__get_vertex = _swig_new_instance_method(_Box2D.b2ChainShape___get_vertex)
__get_count = _swig_new_instance_method(_Box2D.b2ChainShape___get_count)
# Register b2ChainShape in _Box2D:
_Box2D.b2ChainShape_swigregister(b2ChainShape)
class b2PolygonShape(b2Shape):
r"""A convex polygon. It is assumed that the interior of the polygon is to the left of each edge. Polygons have a maximum number of vertices equal to b2_maxPolygonVertices. In most cases you should not need many vertices for a convex polygon."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2PolygonShape_swiginit(self,_Box2D.new_b2PolygonShape())
_init_kwargs(self, **kwargs)
SetAsBox = _swig_new_instance_method(_Box2D.b2PolygonShape_SetAsBox)
Validate = _swig_new_instance_method(_Box2D.b2PolygonShape_Validate)
centroid = property(_Box2D.b2PolygonShape_centroid_get, _Box2D.b2PolygonShape_centroid_set, doc=r"""centroid : b2Vec2""")
vertexCount = property(_Box2D.b2PolygonShape_vertexCount_get, _Box2D.b2PolygonShape_vertexCount_set, doc=r"""vertexCount : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2PolygonShape___hash__)
def __repr__(self):
return _format_repr(self)
__get_vertices = _swig_new_instance_method(_Box2D.b2PolygonShape___get_vertices)
__get_normals = _swig_new_instance_method(_Box2D.b2PolygonShape___get_normals)
def __repr__(self):
return "b2PolygonShape(vertices: %s)" % (self.vertices)
def __clear_vertices(self):
self.vertexCount=0
for i in range(0, b2_maxPolygonVertices):
self.set_vertex(i, 0, 0)
def __set_vertices(self, values):
if not values:
self.__clear_vertices()
else:
if len(values) < 2 or len(values) > b2_maxPolygonVertices:
raise ValueError('Expected tuple or list of length >= 2 and less than b2_maxPolygonVertices=%d, got length %d.' %
(b2_maxPolygonVertices, len(values)))
for i,value in enumerate(values):
if isinstance(value, (tuple, list, b2Vec2)):
if len(value) != 2:
raise ValueError('Expected tuple or list of length 2, got length %d' % len(value))
self.set_vertex(i, *value)
else:
raise ValueError('Expected tuple, list, or b2Vec2, got %s' % type(value))
self.vertexCount=i+1 # follow along in case of an exception to indicate valid number set
self.__set_vertices_internal() # calculates normals, centroid, etc.
def __iter__(self):
"""
Iterates over the vertices in the polygon
"""
for v in self.vertices:
yield v
def __IsValid(self):
return b2CheckPolygon(self)
valid = property(__IsValid, None, doc="Checks the polygon to see if it can be properly created. Raises ValueError for invalid shapes.")
vertices = property(__get_vertices, __set_vertices, doc="All of the vertices as a list of tuples [ (x1,y1), (x2,y2) ... (xN,yN) ]")
normals = property(__get_normals, None, doc="All of the normals as a list of tuples [ (x1,y1), (x2,y2) ... (xN,yN) ]")
box = property(None, lambda self, value: self.SetAsBox(*value), doc="Property replacement for running SetAsBox (Write-only)")
__get_vertex = _swig_new_instance_method(_Box2D.b2PolygonShape___get_vertex)
__get_normal = _swig_new_instance_method(_Box2D.b2PolygonShape___get_normal)
set_vertex = _swig_new_instance_method(_Box2D.b2PolygonShape_set_vertex)
__set_vertices_internal = _swig_new_instance_method(_Box2D.b2PolygonShape___set_vertices_internal)
__swig_destroy__ = _Box2D.delete_b2PolygonShape
# Register b2PolygonShape in _Box2D:
_Box2D.b2PolygonShape_swigregister(b2PolygonShape)
b2_nullNode = _Box2D.b2_nullNode
class b2TreeNode(object):
r"""Proxy of C++ b2TreeNode class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
IsLeaf = _swig_new_instance_method(_Box2D.b2TreeNode_IsLeaf)
aabb = property(_Box2D.b2TreeNode_aabb_get, _Box2D.b2TreeNode_aabb_set, doc=r"""aabb : b2AABB""")
child1 = property(_Box2D.b2TreeNode_child1_get, _Box2D.b2TreeNode_child1_set, doc=r"""child1 : int32""")
child2 = property(_Box2D.b2TreeNode_child2_get, _Box2D.b2TreeNode_child2_set, doc=r"""child2 : int32""")
height = property(_Box2D.b2TreeNode_height_get, _Box2D.b2TreeNode_height_set, doc=r"""height : int32""")
def __init__(self):
r"""__init__(b2TreeNode self) -> b2TreeNode"""
_Box2D.b2TreeNode_swiginit(self, _Box2D.new_b2TreeNode())
__swig_destroy__ = _Box2D.delete_b2TreeNode
# Register b2TreeNode in _Box2D:
_Box2D.b2TreeNode_swigregister(b2TreeNode)
class b2Pair(object):
r"""Proxy of C++ b2Pair class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
proxyIdA = property(_Box2D.b2Pair_proxyIdA_get, _Box2D.b2Pair_proxyIdA_set, doc=r"""proxyIdA : int32""")
proxyIdB = property(_Box2D.b2Pair_proxyIdB_get, _Box2D.b2Pair_proxyIdB_set, doc=r"""proxyIdB : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Pair___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2Pair_swiginit(self,_Box2D.new_b2Pair())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2Pair
# Register b2Pair in _Box2D:
_Box2D.b2Pair_swigregister(b2Pair)
class b2BroadPhase(object):
r"""The broad-phase is used for computing pairs and performing volume queries and ray casts. This broad-phase does not persist pairs. Instead, this reports potentially new pairs. It is up to the client to consume the new pairs and to track subsequent overlap."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
e_nullProxy = _Box2D.b2BroadPhase_e_nullProxy
def __init__(self):
r"""
__init__(b2BroadPhase self) -> b2BroadPhase
The broad-phase is used for computing pairs and performing volume queries and ray casts. This broad-phase does not persist pairs. Instead, this reports potentially new pairs. It is up to the client to consume the new pairs and to track subsequent overlap.
"""
_Box2D.b2BroadPhase_swiginit(self, _Box2D.new_b2BroadPhase())
__swig_destroy__ = _Box2D.delete_b2BroadPhase
MoveProxy = _swig_new_instance_method(_Box2D.b2BroadPhase_MoveProxy)
TouchProxy = _swig_new_instance_method(_Box2D.b2BroadPhase_TouchProxy)
GetFatAABB = _swig_new_instance_method(_Box2D.b2BroadPhase_GetFatAABB)
TestOverlap = _swig_new_instance_method(_Box2D.b2BroadPhase_TestOverlap)
__GetProxyCount = _swig_new_instance_method(_Box2D.b2BroadPhase___GetProxyCount)
__GetTreeHeight = _swig_new_instance_method(_Box2D.b2BroadPhase___GetTreeHeight)
__GetTreeBalance = _swig_new_instance_method(_Box2D.b2BroadPhase___GetTreeBalance)
__GetTreeQuality = _swig_new_instance_method(_Box2D.b2BroadPhase___GetTreeQuality)
ShiftOrigin = _swig_new_instance_method(_Box2D.b2BroadPhase_ShiftOrigin)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2BroadPhase___hash__)
def __repr__(self):
return _format_repr(self)
proxyCount=property(__GetProxyCount, None)
treeHeight=property(__GetTreeHeight, None)
treeBalance=property(__GetTreeBalance, None)
treeQuality=property(__GetTreeQuality, None)
# Register b2BroadPhase in _Box2D:
_Box2D.b2BroadPhase_swigregister(b2BroadPhase)
b2PairLessThan = _Box2D.b2PairLessThan
class b2DistanceProxy(object):
r"""A distance proxy is used by the GJK algorithm. It encapsulates any shape."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, shape, index=0):
_Box2D.b2DistanceProxy_swiginit(self,_Box2D.new_b2DistanceProxy())
self.Set(shape, index)
Set = _swig_new_instance_method(_Box2D.b2DistanceProxy_Set)
GetSupport = _swig_new_instance_method(_Box2D.b2DistanceProxy_GetSupport)
GetSupportVertex = _swig_new_instance_method(_Box2D.b2DistanceProxy_GetSupportVertex)
__get_vertex_count = _swig_new_instance_method(_Box2D.b2DistanceProxy___get_vertex_count)
__get_vertex = _swig_new_instance_method(_Box2D.b2DistanceProxy___get_vertex)
m_buffer = property(_Box2D.b2DistanceProxy_m_buffer_get, _Box2D.b2DistanceProxy_m_buffer_set, doc=r"""m_buffer : a(2).b2Vec2""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DistanceProxy___hash__)
def __repr__(self):
return _format_repr(self)
def __get_vertices(self):
"""Returns all of the vertices as a list of tuples [ (x1,y1), (x2,y2) ... (xN,yN) ]"""
return [ (self.__get_vertex(i).x, self.__get_vertex(i).y )
for i in range(0, self.__get_vertex_count())]
vertices = property(__get_vertices, None)
__swig_destroy__ = _Box2D.delete_b2DistanceProxy
# Register b2DistanceProxy in _Box2D:
_Box2D.b2DistanceProxy_swigregister(b2DistanceProxy)
class b2DistanceInput(object):
r"""Input for b2Distance. You have to option to use the shape radii in the computation. Even"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
proxyA = property(_Box2D.b2DistanceInput_proxyA_get, _Box2D.b2DistanceInput_proxyA_set, doc=r"""proxyA : b2DistanceProxy""")
proxyB = property(_Box2D.b2DistanceInput_proxyB_get, _Box2D.b2DistanceInput_proxyB_set, doc=r"""proxyB : b2DistanceProxy""")
transformA = property(_Box2D.b2DistanceInput_transformA_get, _Box2D.b2DistanceInput_transformA_set, doc=r"""transformA : b2Transform""")
transformB = property(_Box2D.b2DistanceInput_transformB_get, _Box2D.b2DistanceInput_transformB_set, doc=r"""transformB : b2Transform""")
useRadii = property(_Box2D.b2DistanceInput_useRadii_get, _Box2D.b2DistanceInput_useRadii_set, doc=r"""useRadii : bool""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DistanceInput___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2DistanceInput_swiginit(self,_Box2D.new_b2DistanceInput())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2DistanceInput
# Register b2DistanceInput in _Box2D:
_Box2D.b2DistanceInput_swigregister(b2DistanceInput)
class b2DistanceOutput(object):
r"""Output for b2Distance."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pointA = property(_Box2D.b2DistanceOutput_pointA_get, _Box2D.b2DistanceOutput_pointA_set, doc=r"""pointA : b2Vec2""")
pointB = property(_Box2D.b2DistanceOutput_pointB_get, _Box2D.b2DistanceOutput_pointB_set, doc=r"""pointB : b2Vec2""")
distance = property(_Box2D.b2DistanceOutput_distance_get, _Box2D.b2DistanceOutput_distance_set, doc=r"""distance : float32""")
iterations = property(_Box2D.b2DistanceOutput_iterations_get, _Box2D.b2DistanceOutput_iterations_set, doc=r"""iterations : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DistanceOutput___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2DistanceOutput_swiginit(self,_Box2D.new_b2DistanceOutput())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2DistanceOutput
# Register b2DistanceOutput in _Box2D:
_Box2D.b2DistanceOutput_swigregister(b2DistanceOutput)
class b2TOIInput(object):
r"""Input parameters for b2TimeOfImpact."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
proxyA = property(_Box2D.b2TOIInput_proxyA_get, _Box2D.b2TOIInput_proxyA_set, doc=r"""proxyA : b2DistanceProxy""")
proxyB = property(_Box2D.b2TOIInput_proxyB_get, _Box2D.b2TOIInput_proxyB_set, doc=r"""proxyB : b2DistanceProxy""")
sweepA = property(_Box2D.b2TOIInput_sweepA_get, _Box2D.b2TOIInput_sweepA_set, doc=r"""sweepA : b2Sweep""")
sweepB = property(_Box2D.b2TOIInput_sweepB_get, _Box2D.b2TOIInput_sweepB_set, doc=r"""sweepB : b2Sweep""")
tMax = property(_Box2D.b2TOIInput_tMax_get, _Box2D.b2TOIInput_tMax_set, doc=r"""tMax : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2TOIInput___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2TOIInput_swiginit(self,_Box2D.new_b2TOIInput())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2TOIInput
# Register b2TOIInput in _Box2D:
_Box2D.b2TOIInput_swigregister(b2TOIInput)
class b2TOIOutput(object):
r"""Proxy of C++ b2TOIOutput class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
e_unknown = _Box2D.b2TOIOutput_e_unknown
e_failed = _Box2D.b2TOIOutput_e_failed
e_overlapped = _Box2D.b2TOIOutput_e_overlapped
e_touching = _Box2D.b2TOIOutput_e_touching
e_separated = _Box2D.b2TOIOutput_e_separated
state = property(_Box2D.b2TOIOutput_state_get, _Box2D.b2TOIOutput_state_set, doc=r"""state : b2TOIOutput::State""")
t = property(_Box2D.b2TOIOutput_t_get, _Box2D.b2TOIOutput_t_set, doc=r"""t : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2TOIOutput___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self):
r"""__init__(b2TOIOutput self) -> b2TOIOutput"""
_Box2D.b2TOIOutput_swiginit(self, _Box2D.new_b2TOIOutput())
__swig_destroy__ = _Box2D.delete_b2TOIOutput
# Register b2TOIOutput in _Box2D:
_Box2D.b2TOIOutput_swigregister(b2TOIOutput)
b2_staticBody = _Box2D.b2_staticBody
b2_kinematicBody = _Box2D.b2_kinematicBody
b2_dynamicBody = _Box2D.b2_dynamicBody
class b2BodyDef(object):
r"""A body definition holds all the data needed to construct a rigid body. You can safely re-use body definitions. Shapes are added to a body after construction."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2BodyDef_swiginit(self,_Box2D.new_b2BodyDef())
_init_kwargs(self, **kwargs)
type = property(_Box2D.b2BodyDef_type_get, _Box2D.b2BodyDef_type_set, doc=r"""type : b2BodyType""")
position = property(_Box2D.b2BodyDef_position_get, _Box2D.b2BodyDef_position_set, doc=r"""position : b2Vec2""")
angle = property(_Box2D.b2BodyDef_angle_get, _Box2D.b2BodyDef_angle_set, doc=r"""angle : float32""")
linearVelocity = property(_Box2D.b2BodyDef_linearVelocity_get, _Box2D.b2BodyDef_linearVelocity_set, doc=r"""linearVelocity : b2Vec2""")
angularVelocity = property(_Box2D.b2BodyDef_angularVelocity_get, _Box2D.b2BodyDef_angularVelocity_set, doc=r"""angularVelocity : float32""")
linearDamping = property(_Box2D.b2BodyDef_linearDamping_get, _Box2D.b2BodyDef_linearDamping_set, doc=r"""linearDamping : float32""")
angularDamping = property(_Box2D.b2BodyDef_angularDamping_get, _Box2D.b2BodyDef_angularDamping_set, doc=r"""angularDamping : float32""")
allowSleep = property(_Box2D.b2BodyDef_allowSleep_get, _Box2D.b2BodyDef_allowSleep_set, doc=r"""allowSleep : bool""")
awake = property(_Box2D.b2BodyDef_awake_get, _Box2D.b2BodyDef_awake_set, doc=r"""awake : bool""")
fixedRotation = property(_Box2D.b2BodyDef_fixedRotation_get, _Box2D.b2BodyDef_fixedRotation_set, doc=r"""fixedRotation : bool""")
bullet = property(_Box2D.b2BodyDef_bullet_get, _Box2D.b2BodyDef_bullet_set, doc=r"""bullet : bool""")
active = property(_Box2D.b2BodyDef_active_get, _Box2D.b2BodyDef_active_set, doc=r"""active : bool""")
gravityScale = property(_Box2D.b2BodyDef_gravityScale_get, _Box2D.b2BodyDef_gravityScale_set, doc=r"""gravityScale : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2BodyDef___hash__)
def __repr__(self):
return _format_repr(self)
__GetUserData = _swig_new_instance_method(_Box2D.b2BodyDef___GetUserData)
__SetUserData = _swig_new_instance_method(_Box2D.b2BodyDef___SetUserData)
ClearUserData = _swig_new_instance_method(_Box2D.b2BodyDef_ClearUserData)
userData = property(__GetUserData, __SetUserData)
def __del__(self):
self.ClearUserData()
_fixtures = None
_shapes = None
_shapeFixture = None
@property
def fixtures(self):
return self._fixtures
@fixtures.setter
def fixtures(self, fixtures):
if isinstance(fixtures, b2FixtureDef):
self._fixtures = [fixtures]
else:
self._fixtures = list(fixtures)
@property
def shapes(self):
return self._shapes
@shapes.setter
def shapes(self, shapes):
if isinstance(shapes, b2Shape):
self._shapes = [shapes]
else:
self._shapes = list(shapes)
@property
def shapeFixture(self):
return self._shapeFixture
@shapeFixture.setter
def shapeFixture(self, fixture):
self._shapeFixture = fixture
__swig_destroy__ = _Box2D.delete_b2BodyDef
# Register b2BodyDef in _Box2D:
_Box2D.b2BodyDef_swigregister(b2BodyDef)
class b2Body(object):
r"""A rigid body. These are created via b2World::CreateBody."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__SetTransform = _swig_new_instance_method(_Box2D.b2Body___SetTransform)
__GetTransform = _swig_new_instance_method(_Box2D.b2Body___GetTransform)
__GetPosition = _swig_new_instance_method(_Box2D.b2Body___GetPosition)
__GetAngle = _swig_new_instance_method(_Box2D.b2Body___GetAngle)
__GetWorldCenter = _swig_new_instance_method(_Box2D.b2Body___GetWorldCenter)
__GetLocalCenter = _swig_new_instance_method(_Box2D.b2Body___GetLocalCenter)
__SetLinearVelocity = _swig_new_instance_method(_Box2D.b2Body___SetLinearVelocity)
__GetLinearVelocity = _swig_new_instance_method(_Box2D.b2Body___GetLinearVelocity)
__SetAngularVelocity = _swig_new_instance_method(_Box2D.b2Body___SetAngularVelocity)
__GetAngularVelocity = _swig_new_instance_method(_Box2D.b2Body___GetAngularVelocity)
ApplyForce = _swig_new_instance_method(_Box2D.b2Body_ApplyForce)
ApplyForceToCenter = _swig_new_instance_method(_Box2D.b2Body_ApplyForceToCenter)
ApplyTorque = _swig_new_instance_method(_Box2D.b2Body_ApplyTorque)
ApplyLinearImpulse = _swig_new_instance_method(_Box2D.b2Body_ApplyLinearImpulse)
ApplyAngularImpulse = _swig_new_instance_method(_Box2D.b2Body_ApplyAngularImpulse)
__GetMass = _swig_new_instance_method(_Box2D.b2Body___GetMass)
__GetInertia = _swig_new_instance_method(_Box2D.b2Body___GetInertia)
GetMassData = _swig_new_instance_method(_Box2D.b2Body_GetMassData)
__SetMassData = _swig_new_instance_method(_Box2D.b2Body___SetMassData)
ResetMassData = _swig_new_instance_method(_Box2D.b2Body_ResetMassData)
GetWorldPoint = _swig_new_instance_method(_Box2D.b2Body_GetWorldPoint)
GetWorldVector = _swig_new_instance_method(_Box2D.b2Body_GetWorldVector)
GetLocalPoint = _swig_new_instance_method(_Box2D.b2Body_GetLocalPoint)
GetLocalVector = _swig_new_instance_method(_Box2D.b2Body_GetLocalVector)
GetLinearVelocityFromWorldPoint = _swig_new_instance_method(_Box2D.b2Body_GetLinearVelocityFromWorldPoint)
GetLinearVelocityFromLocalPoint = _swig_new_instance_method(_Box2D.b2Body_GetLinearVelocityFromLocalPoint)
__GetLinearDamping = _swig_new_instance_method(_Box2D.b2Body___GetLinearDamping)
__SetLinearDamping = _swig_new_instance_method(_Box2D.b2Body___SetLinearDamping)
__GetAngularDamping = _swig_new_instance_method(_Box2D.b2Body___GetAngularDamping)
__SetAngularDamping = _swig_new_instance_method(_Box2D.b2Body___SetAngularDamping)
__GetGravityScale = _swig_new_instance_method(_Box2D.b2Body___GetGravityScale)
__SetGravityScale = _swig_new_instance_method(_Box2D.b2Body___SetGravityScale)
__SetType = _swig_new_instance_method(_Box2D.b2Body___SetType)
__GetType = _swig_new_instance_method(_Box2D.b2Body___GetType)
__SetBullet = _swig_new_instance_method(_Box2D.b2Body___SetBullet)
__IsBullet = _swig_new_instance_method(_Box2D.b2Body___IsBullet)
__SetSleepingAllowed = _swig_new_instance_method(_Box2D.b2Body___SetSleepingAllowed)
__IsSleepingAllowed = _swig_new_instance_method(_Box2D.b2Body___IsSleepingAllowed)
__SetAwake = _swig_new_instance_method(_Box2D.b2Body___SetAwake)
__IsAwake = _swig_new_instance_method(_Box2D.b2Body___IsAwake)
__SetActive = _swig_new_instance_method(_Box2D.b2Body___SetActive)
__IsActive = _swig_new_instance_method(_Box2D.b2Body___IsActive)
__SetFixedRotation = _swig_new_instance_method(_Box2D.b2Body___SetFixedRotation)
__IsFixedRotation = _swig_new_instance_method(_Box2D.b2Body___IsFixedRotation)
__GetFixtureList_internal = _swig_new_instance_method(_Box2D.b2Body___GetFixtureList_internal)
__GetJointList_internal = _swig_new_instance_method(_Box2D.b2Body___GetJointList_internal)
__GetContactList_internal = _swig_new_instance_method(_Box2D.b2Body___GetContactList_internal)
__GetNext = _swig_new_instance_method(_Box2D.b2Body___GetNext)
__GetWorld = _swig_new_instance_method(_Box2D.b2Body___GetWorld)
Dump = _swig_new_instance_method(_Box2D.b2Body_Dump)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Body___hash__)
def __repr__(self):
return _format_repr(self)
DestroyFixture = _swig_new_instance_method(_Box2D.b2Body_DestroyFixture)
__CreateFixture = _swig_new_instance_method(_Box2D.b2Body___CreateFixture)
__GetUserData = _swig_new_instance_method(_Box2D.b2Body___GetUserData)
__SetUserData = _swig_new_instance_method(_Box2D.b2Body___SetUserData)
ClearUserData = _swig_new_instance_method(_Box2D.b2Body_ClearUserData)
userData = property(__GetUserData, __SetUserData)
__eq__ = b2BodyCompare
__ne__ = lambda self,other: not b2BodyCompare(self,other)
def __GetMassData(self):
"""
Get a b2MassData object that represents this b2Body
NOTE: To just get the mass, use body.mass
"""
ret = b2MassData()
ret.center=self.localCenter
ret.I = self.inertia
ret.mass = self.mass
return ret
def __SetInertia(self, inertia):
"""
Set the body's inertia
"""
md = self.massData
md.I = inertia
self.massData=md
def __SetMass(self, mass):
"""
Set the body's mass
"""
md = self.massData
md.mass = mass
self.massData=md
def __SetLocalCenter(self, lcenter):
"""
Set the body's local center
"""
md = self.massData
md.center = lcenter
self.massData=md
def __iter__(self):
"""
Iterates over the fixtures in the body
"""
for fixture in self.fixtures:
yield fixture
def __CreateShapeFixture(self, type_, **kwargs):
"""
Internal function to handle creating circles, polygons, etc.
without first creating a fixture. type_ is b2Shape.
"""
shape=type_()
fixture=b2FixtureDef(shape=shape)
for key, value in kwargs.items():
# Note that these hasattrs use the types to get around
# the fact that some properties are write-only (like 'box' in
# polygon shapes), and as such do not show up with 'hasattr'.
if hasattr(type_, key):
to_set=shape
elif hasattr(b2FixtureDef, key):
to_set=fixture
else:
raise AttributeError('Property %s not found in either %s or b2FixtureDef' % (key, type_.__name__))
try:
setattr(to_set, key, value)
except Exception as ex:
raise ex.__class__('Failed on kwargs, class="%s" key="%s": %s' \
% (to_set.__class__.__name__, key, ex))
return self.CreateFixture(fixture)
def CreatePolygonFixture(self, **kwargs):
"""
Create a polygon shape without an explicit fixture definition.
Takes kwargs; you can pass in properties for either the polygon
or the fixture to this function. For example:
CreatePolygonFixture(box=(1, 1), friction=0.2, density=1.0)
where 'box' is a property from the polygon shape, and
'friction' and 'density' are from the fixture definition.
"""
return self.__CreateShapeFixture(b2PolygonShape, **kwargs)
def CreateCircleFixture(self, **kwargs):
"""
Create a circle shape without an explicit fixture definition.
Takes kwargs; you can pass in properties for either the circle
or the fixture to this function. For example:
CreateCircleFixture(radius=0.2, friction=0.2, density=1.0)
where 'radius' is a property from the circle shape, and
'friction' and 'density' are from the fixture definition.
"""
return self.__CreateShapeFixture(b2CircleShape, **kwargs)
def CreateEdgeFixture(self, **kwargs):
"""
Create a edge shape without an explicit fixture definition.
Takes kwargs; you can pass in properties for either the edge
or the fixture to this function. For example:
CreateEdgeFixture(vertices=[(0,0),(1,0)], friction=0.2, density=1.0)
where 'vertices' is a property from the edge shape, and
'friction' and 'density' are from the fixture definition.
"""
return self.__CreateShapeFixture(b2EdgeShape, **kwargs)
def CreateLoopFixture(self, **kwargs):
"""
Create a loop shape without an explicit fixture definition.
Takes kwargs; you can pass in properties for either the loop
or the fixture to this function. For example:
CreateLoopFixture(vertices=[...], friction=0.2, density=1.0)
where 'vertices' is a property from the loop shape, and
'friction' and 'density' are from the fixture definition.
"""
return self.__CreateShapeFixture(b2ChainShape, **kwargs)
CreateChainFixture = CreateLoopFixture
def CreateFixturesFromShapes(self, shapes=None, shapeFixture=None):
"""
Create fixture(s) on the body from one or more shapes, and optionally a single
fixture definition.
Takes kwargs; examples of valid combinations are as follows:
CreateFixturesFromShapes(shapes=b2CircleShape(radius=0.2))
CreateFixturesFromShapes(shapes=b2CircleShape(radius=0.2), shapeFixture=b2FixtureDef(friction=0.2))
CreateFixturesFromShapes(shapes=[b2CircleShape(radius=0.2), b2PolygonShape(box=[1,2])])
"""
if shapes==None:
raise TypeError('At least one shape required')
if shapeFixture==None:
shapeFixture=b2FixtureDef()
oldShape=None
else:
oldShape = shapeFixture.shape
ret=None
try:
if isinstance(shapes, (list, tuple)):
ret = []
for shape in shapes:
shapeFixture.shape = shape
ret.append(self.__CreateFixture(shapeFixture))
else:
shapeFixture.shape=shapes
ret = self.__CreateFixture(shapeFixture)
finally:
shapeFixture.shape=oldShape
return ret
def CreateFixture(self, defn=None, **kwargs):
"""
Create a fixtures on the body.
Takes kwargs; examples of valid combinations are as follows:
CreateFixture(b2FixtureDef(shape=s, restitution=0.2, ...))
CreateFixture(shape=s, restitution=0.2, ...)
"""
if defn is not None and isinstance(defn, b2FixtureDef):
return self.__CreateFixture(defn)
else:
if 'shape' not in kwargs:
raise ValueError('Must specify the shape for the fixture')
return self.__CreateFixture(b2FixtureDef(**kwargs))
def CreateEdgeChain(self, edge_list):
"""
Creates a body a set of connected edge chains.
Expects edge_list to be a list of vertices, length >= 2.
"""
prev=None
if len(edge_list) < 2:
raise ValueError('Edge list length >= 2')
shape=b2EdgeShape(vertices=[list(i) for i in edge_list[0:2]])
self.CreateFixturesFromShapes(shape)
prev = edge_list[1]
for edge in edge_list[1:]:
if len(edge) != 2:
raise ValueError('Vertex length != 2, "%s"' % list(edge))
shape.vertices = [list(prev), list(edge)]
self.CreateFixturesFromShapes(shape)
prev=edge
# Read-write properties
sleepingAllowed = property(__IsSleepingAllowed, __SetSleepingAllowed)
angularVelocity = property(__GetAngularVelocity, __SetAngularVelocity)
linearVelocity = property(__GetLinearVelocity, __SetLinearVelocity)
awake = property(__IsAwake, __SetAwake)
angularDamping = property(__GetAngularDamping, __SetAngularDamping)
fixedRotation = property(__IsFixedRotation, __SetFixedRotation)
linearDamping = property(__GetLinearDamping, __SetLinearDamping)
bullet = property(__IsBullet, __SetBullet)
type = property(__GetType, __SetType)
active = property(__IsActive, __SetActive)
angle = property(__GetAngle, lambda self, angle: self.__SetTransform(self.position, angle))
transform = property(__GetTransform, lambda self, value: self.__SetTransform(*value))
massData = property(__GetMassData, __SetMassData)
mass = property(__GetMass, __SetMass)
localCenter = property(__GetLocalCenter, __SetLocalCenter)
inertia = property(__GetInertia, __SetInertia)
position = property(__GetPosition, lambda self, pos: self.__SetTransform(pos, self.angle))
gravityScale = property(__GetGravityScale, __SetGravityScale)
# Read-only
joints = property(lambda self: _list_from_linked_list(self.__GetJointList_internal()), None,
doc="""All joints connected to the body as a list.
NOTE: This re-creates the list on every call. See also joints_gen.""")
contacts = property(lambda self: _list_from_linked_list(self.__GetContactList_internal()), None,
doc="""All contacts related to the body as a list.
NOTE: This re-creates the list on every call. See also contacts_gen.""")
fixtures = property(lambda self: _list_from_linked_list(self.__GetFixtureList_internal()), None,
doc="""All fixtures contained in this body as a list.
NOTE: This re-creates the list on every call. See also fixtures_gen.""")
joints_gen = property(lambda self: _indexable_generator(_generator_from_linked_list(self.__GetJointList_internal())), None,
doc="""Indexable generator of the connected joints to this body.
NOTE: When not using the whole list, this may be preferable to using 'joints'.""")
contacts_gen = property(lambda self: _indexable_generator(_generator_from_linked_list(self.__GetContactList_internal())), None,
doc="""Indexable generator of the related contacts.
NOTE: When not using the whole list, this may be preferable to using 'contacts'.""")
fixtures_gen = property(lambda self: _indexable_generator(_generator_from_linked_list(self.__GetFixtureList_internal())), None,
doc="""Indexable generator of the contained fixtures.
NOTE: When not using the whole list, this may be preferable to using 'fixtures'.""")
next = property(__GetNext, None)
worldCenter = property(__GetWorldCenter, None)
world = property(__GetWorld, None)
# Register b2Body in _Box2D:
_Box2D.b2Body_swigregister(b2Body)
class b2Filter(object):
r"""This holds contact filtering data."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2Filter_swiginit(self,_Box2D.new_b2Filter())
_init_kwargs(self, **kwargs)
categoryBits = property(_Box2D.b2Filter_categoryBits_get, _Box2D.b2Filter_categoryBits_set, doc=r"""categoryBits : uint16""")
maskBits = property(_Box2D.b2Filter_maskBits_get, _Box2D.b2Filter_maskBits_set, doc=r"""maskBits : uint16""")
groupIndex = property(_Box2D.b2Filter_groupIndex_get, _Box2D.b2Filter_groupIndex_set, doc=r"""groupIndex : int16""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Filter___hash__)
def __repr__(self):
return _format_repr(self)
__swig_destroy__ = _Box2D.delete_b2Filter
# Register b2Filter in _Box2D:
_Box2D.b2Filter_swigregister(b2Filter)
class b2FixtureDef(object):
r"""A fixture definition is used to create a fixture. This class defines an abstract fixture definition. You can reuse fixture definitions safely."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2FixtureDef_swiginit(self,_Box2D.new_b2FixtureDef())
_init_kwargs(self, **kwargs)
shape = property(_Box2D.b2FixtureDef_shape_get, _Box2D.b2FixtureDef_shape_set, doc=r"""shape : p.q(const).b2Shape""")
friction = property(_Box2D.b2FixtureDef_friction_get, _Box2D.b2FixtureDef_friction_set, doc=r"""friction : float32""")
restitution = property(_Box2D.b2FixtureDef_restitution_get, _Box2D.b2FixtureDef_restitution_set, doc=r"""restitution : float32""")
density = property(_Box2D.b2FixtureDef_density_get, _Box2D.b2FixtureDef_density_set, doc=r"""density : float32""")
isSensor = property(_Box2D.b2FixtureDef_isSensor_get, _Box2D.b2FixtureDef_isSensor_set, doc=r"""isSensor : bool""")
filter = property(_Box2D.b2FixtureDef_filter_get, _Box2D.b2FixtureDef_filter_set, doc=r"""filter : b2Filter""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2FixtureDef___hash__)
def __repr__(self):
return _format_repr(self)
__GetUserData = _swig_new_instance_method(_Box2D.b2FixtureDef___GetUserData)
__SetUserData = _swig_new_instance_method(_Box2D.b2FixtureDef___SetUserData)
ClearUserData = _swig_new_instance_method(_Box2D.b2FixtureDef_ClearUserData)
userData = property(__GetUserData, __SetUserData)
def __del__(self):
self.ClearUserData()
def __SetCategoryBits(self, value):
self.filter.categoryBits=value
def __SetGroupIndex(self, value):
self.filter.groupIndex=value
def __SetMaskBits(self, value):
self.filter.maskBits=value
categoryBits=property(lambda self: self.filter.categoryBits, __SetCategoryBits)
groupIndex=property(lambda self: self.filter.groupIndex, __SetGroupIndex)
maskBits=property(lambda self: self.filter.maskBits, __SetMaskBits)
__swig_destroy__ = _Box2D.delete_b2FixtureDef
# Register b2FixtureDef in _Box2D:
_Box2D.b2FixtureDef_swigregister(b2FixtureDef)
class b2FixtureProxy(object):
r"""This proxy is used internally to connect fixtures to the broad-phase."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
aabb = property(_Box2D.b2FixtureProxy_aabb_get, _Box2D.b2FixtureProxy_aabb_set, doc=r"""aabb : b2AABB""")
fixture = property(_Box2D.b2FixtureProxy_fixture_get, _Box2D.b2FixtureProxy_fixture_set, doc=r"""fixture : p.b2Fixture""")
childIndex = property(_Box2D.b2FixtureProxy_childIndex_get, _Box2D.b2FixtureProxy_childIndex_set, doc=r"""childIndex : int32""")
proxyId = property(_Box2D.b2FixtureProxy_proxyId_get, _Box2D.b2FixtureProxy_proxyId_set, doc=r"""proxyId : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2FixtureProxy___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self):
r"""
__init__(b2FixtureProxy self) -> b2FixtureProxy
This proxy is used internally to connect fixtures to the broad-phase.
"""
_Box2D.b2FixtureProxy_swiginit(self, _Box2D.new_b2FixtureProxy())
__swig_destroy__ = _Box2D.delete_b2FixtureProxy
# Register b2FixtureProxy in _Box2D:
_Box2D.b2FixtureProxy_swigregister(b2FixtureProxy)
class b2Fixture(object):
r"""
A fixture is used to attach a shape to a body for collision detection. A fixture inherits its transform from its parent. Fixtures hold additional non-geometric data such as friction, collision filters, etc. Fixtures are created via b2Body::CreateFixture.
WARNING:
you cannot reuse fixtures.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__GetType = _swig_new_instance_method(_Box2D.b2Fixture___GetType)
__GetShape = _swig_new_instance_method(_Box2D.b2Fixture___GetShape)
__SetSensor = _swig_new_instance_method(_Box2D.b2Fixture___SetSensor)
__IsSensor = _swig_new_instance_method(_Box2D.b2Fixture___IsSensor)
__SetFilterData = _swig_new_instance_method(_Box2D.b2Fixture___SetFilterData)
__GetFilterData = _swig_new_instance_method(_Box2D.b2Fixture___GetFilterData)
Refilter = _swig_new_instance_method(_Box2D.b2Fixture_Refilter)
__GetBody = _swig_new_instance_method(_Box2D.b2Fixture___GetBody)
__GetNext = _swig_new_instance_method(_Box2D.b2Fixture___GetNext)
TestPoint = _swig_new_instance_method(_Box2D.b2Fixture_TestPoint)
RayCast = _swig_new_instance_method(_Box2D.b2Fixture_RayCast)
__GetMassData = _swig_new_instance_method(_Box2D.b2Fixture___GetMassData)
__SetDensity = _swig_new_instance_method(_Box2D.b2Fixture___SetDensity)
__GetDensity = _swig_new_instance_method(_Box2D.b2Fixture___GetDensity)
__GetFriction = _swig_new_instance_method(_Box2D.b2Fixture___GetFriction)
__SetFriction = _swig_new_instance_method(_Box2D.b2Fixture___SetFriction)
__GetRestitution = _swig_new_instance_method(_Box2D.b2Fixture___GetRestitution)
__SetRestitution = _swig_new_instance_method(_Box2D.b2Fixture___SetRestitution)
GetAABB = _swig_new_instance_method(_Box2D.b2Fixture_GetAABB)
Dump = _swig_new_instance_method(_Box2D.b2Fixture_Dump)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Fixture___hash__)
def __repr__(self):
return _format_repr(self)
__GetUserData = _swig_new_instance_method(_Box2D.b2Fixture___GetUserData)
__SetUserData = _swig_new_instance_method(_Box2D.b2Fixture___SetUserData)
ClearUserData = _swig_new_instance_method(_Box2D.b2Fixture_ClearUserData)
userData = property(__GetUserData, __SetUserData)
__swig_destroy__ = _Box2D.delete_b2Fixture
__eq__ = b2FixtureCompare
__ne__ = lambda self,other: not b2FixtureCompare(self,other)
# Read-write properties
friction = property(__GetFriction, __SetFriction)
restitution = property(__GetRestitution, __SetRestitution)
filterData = property(__GetFilterData, __SetFilterData)
sensor = property(__IsSensor, __SetSensor)
density = property(__GetDensity, __SetDensity)
# Read-only
next = property(__GetNext, None)
type = property(__GetType, None)
shape = property(__GetShape, None)
body = property(__GetBody, None)
@property
def massData(self):
md=b2MassData()
self.__GetMassData(md)
return md
# Register b2Fixture in _Box2D:
_Box2D.b2Fixture_swigregister(b2Fixture)
class b2DestructionListener(object):
r"""Joints and fixtures are destroyed when their associated body is destroyed. Implement this listener so that you may nullify references to these joints and shapes."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__swig_destroy__ = _Box2D.delete_b2DestructionListener
SayGoodbye = _swig_new_instance_method(_Box2D.b2DestructionListener_SayGoodbye)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DestructionListener___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
if self.__class__ == b2DestructionListener:
_self = None
else:
_self = self
_Box2D.b2DestructionListener_swiginit(self,_Box2D.new_b2DestructionListener(_self, ))
_init_kwargs(self, **kwargs)
def __disown__(self):
self.this.disown()
_Box2D.disown_b2DestructionListener(self)
return weakref.proxy(self)
# Register b2DestructionListener in _Box2D:
_Box2D.b2DestructionListener_swigregister(b2DestructionListener)
class b2ContactFilter(object):
r"""Implement this class to provide collision filtering. In other words, you can implement this class if you want finer control over contact creation."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__swig_destroy__ = _Box2D.delete_b2ContactFilter
ShouldCollide = _swig_new_instance_method(_Box2D.b2ContactFilter_ShouldCollide)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactFilter___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
if self.__class__ == b2ContactFilter:
_self = None
else:
_self = self
_Box2D.b2ContactFilter_swiginit(self,_Box2D.new_b2ContactFilter(_self, ))
_init_kwargs(self, **kwargs)
def __disown__(self):
self.this.disown()
_Box2D.disown_b2ContactFilter(self)
return weakref.proxy(self)
# Register b2ContactFilter in _Box2D:
_Box2D.b2ContactFilter_swigregister(b2ContactFilter)
class b2ContactImpulse(object):
r"""Contact impulses for reporting. Impulses are used instead of forces because sub-step forces may approach infinity for rigid body collisions. These match up one-to-one with the contact points in b2Manifold."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
count = property(_Box2D.b2ContactImpulse_count_get, _Box2D.b2ContactImpulse_count_set, doc=r"""count : int32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactImpulse___hash__)
def __repr__(self):
return _format_repr(self)
__get_normal_impulses = _swig_new_instance_method(_Box2D.b2ContactImpulse___get_normal_impulses)
__get_tangent_impulses = _swig_new_instance_method(_Box2D.b2ContactImpulse___get_tangent_impulses)
normalImpulses = property(__get_normal_impulses, None)
tangentImpulses = property(__get_tangent_impulses, None)
def __init__(self, **kwargs):
_Box2D.b2ContactImpulse_swiginit(self,_Box2D.new_b2ContactImpulse())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ContactImpulse
# Register b2ContactImpulse in _Box2D:
_Box2D.b2ContactImpulse_swigregister(b2ContactImpulse)
class b2ContactListener(object):
r"""
Implement this class to get contact information. You can use these results for things like sounds and game logic. You can also get contact results by traversing the contact lists after the time step. However, you might miss some contacts because continuous physics leads to sub-stepping. Additionally you may receive multiple callbacks for the same contact in a single time step. You should strive to make your callbacks efficient because there may be many callbacks per time step.
WARNING:
You cannot create/destroy Box2D entities inside these callbacks.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__swig_destroy__ = _Box2D.delete_b2ContactListener
BeginContact = _swig_new_instance_method(_Box2D.b2ContactListener_BeginContact)
EndContact = _swig_new_instance_method(_Box2D.b2ContactListener_EndContact)
PreSolve = _swig_new_instance_method(_Box2D.b2ContactListener_PreSolve)
PostSolve = _swig_new_instance_method(_Box2D.b2ContactListener_PostSolve)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactListener___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
if self.__class__ == b2ContactListener:
_self = None
else:
_self = self
_Box2D.b2ContactListener_swiginit(self,_Box2D.new_b2ContactListener(_self, ))
_init_kwargs(self, **kwargs)
def __disown__(self):
self.this.disown()
_Box2D.disown_b2ContactListener(self)
return weakref.proxy(self)
# Register b2ContactListener in _Box2D:
_Box2D.b2ContactListener_swigregister(b2ContactListener)
class b2QueryCallback(object):
r"""Callback class for AABB queries. See b2World::Query"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__swig_destroy__ = _Box2D.delete_b2QueryCallback
ReportFixture = _swig_new_instance_method(_Box2D.b2QueryCallback_ReportFixture)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2QueryCallback___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
if self.__class__ == b2QueryCallback:
_self = None
else:
_self = self
_Box2D.b2QueryCallback_swiginit(self,_Box2D.new_b2QueryCallback(_self, ))
_init_kwargs(self, **kwargs)
def __disown__(self):
self.this.disown()
_Box2D.disown_b2QueryCallback(self)
return weakref.proxy(self)
# Register b2QueryCallback in _Box2D:
_Box2D.b2QueryCallback_swigregister(b2QueryCallback)
class b2RayCastCallback(object):
r"""Callback class for ray casts. See b2World::RayCast"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
__swig_destroy__ = _Box2D.delete_b2RayCastCallback
ReportFixture = _swig_new_instance_method(_Box2D.b2RayCastCallback_ReportFixture)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RayCastCallback___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self):
r"""
__init__(b2RayCastCallback self) -> b2RayCastCallback
Callback class for ray casts. See b2World::RayCast
"""
if self.__class__ == b2RayCastCallback:
_self = None
else:
_self = self
_Box2D.b2RayCastCallback_swiginit(self, _Box2D.new_b2RayCastCallback(_self, ))
def __disown__(self):
self.this.disown()
_Box2D.disown_b2RayCastCallback(self)
return weakref.proxy(self)
# Register b2RayCastCallback in _Box2D:
_Box2D.b2RayCastCallback_swigregister(b2RayCastCallback)
class b2Profile(object):
r"""Proxy of C++ b2Profile class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
step = property(_Box2D.b2Profile_step_get, _Box2D.b2Profile_step_set, doc=r"""step : float32""")
collide = property(_Box2D.b2Profile_collide_get, _Box2D.b2Profile_collide_set, doc=r"""collide : float32""")
solve = property(_Box2D.b2Profile_solve_get, _Box2D.b2Profile_solve_set, doc=r"""solve : float32""")
solveInit = property(_Box2D.b2Profile_solveInit_get, _Box2D.b2Profile_solveInit_set, doc=r"""solveInit : float32""")
solveVelocity = property(_Box2D.b2Profile_solveVelocity_get, _Box2D.b2Profile_solveVelocity_set, doc=r"""solveVelocity : float32""")
solvePosition = property(_Box2D.b2Profile_solvePosition_get, _Box2D.b2Profile_solvePosition_set, doc=r"""solvePosition : float32""")
broadphase = property(_Box2D.b2Profile_broadphase_get, _Box2D.b2Profile_broadphase_set, doc=r"""broadphase : float32""")
solveTOI = property(_Box2D.b2Profile_solveTOI_get, _Box2D.b2Profile_solveTOI_set, doc=r"""solveTOI : float32""")
def __init__(self):
r"""__init__(b2Profile self) -> b2Profile"""
_Box2D.b2Profile_swiginit(self, _Box2D.new_b2Profile())
__swig_destroy__ = _Box2D.delete_b2Profile
# Register b2Profile in _Box2D:
_Box2D.b2Profile_swigregister(b2Profile)
class b2SolverData(object):
r"""Proxy of C++ b2SolverData class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
step = property(_Box2D.b2SolverData_step_get, _Box2D.b2SolverData_step_set, doc=r"""step : b2TimeStep""")
positions = property(_Box2D.b2SolverData_positions_get, _Box2D.b2SolverData_positions_set, doc=r"""positions : p.b2Position""")
velocities = property(_Box2D.b2SolverData_velocities_get, _Box2D.b2SolverData_velocities_set, doc=r"""velocities : p.b2Velocity""")
def __init__(self):
r"""__init__(b2SolverData self) -> b2SolverData"""
_Box2D.b2SolverData_swiginit(self, _Box2D.new_b2SolverData())
__swig_destroy__ = _Box2D.delete_b2SolverData
# Register b2SolverData in _Box2D:
_Box2D.b2SolverData_swigregister(b2SolverData)
class b2ContactManager(object):
r"""Proxy of C++ b2ContactManager class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
r"""__init__(b2ContactManager self) -> b2ContactManager"""
_Box2D.b2ContactManager_swiginit(self, _Box2D.new_b2ContactManager())
AddPair = _swig_new_instance_method(_Box2D.b2ContactManager_AddPair)
FindNewContacts = _swig_new_instance_method(_Box2D.b2ContactManager_FindNewContacts)
Destroy = _swig_new_instance_method(_Box2D.b2ContactManager_Destroy)
Collide = _swig_new_instance_method(_Box2D.b2ContactManager_Collide)
broadPhase = property(_Box2D.b2ContactManager_broadPhase_get, _Box2D.b2ContactManager_broadPhase_set, doc=r"""broadPhase : b2BroadPhase""")
contactList = property(_Box2D.b2ContactManager_contactList_get, _Box2D.b2ContactManager_contactList_set, doc=r"""contactList : p.b2Contact""")
contactCount = property(_Box2D.b2ContactManager_contactCount_get, _Box2D.b2ContactManager_contactCount_set, doc=r"""contactCount : int32""")
contactFilter = property(_Box2D.b2ContactManager_contactFilter_get, _Box2D.b2ContactManager_contactFilter_set, doc=r"""contactFilter : p.b2ContactFilter""")
contactListener = property(_Box2D.b2ContactManager_contactListener_get, _Box2D.b2ContactManager_contactListener_set, doc=r"""contactListener : p.b2ContactListener""")
allocator = property(_Box2D.b2ContactManager_allocator_get, _Box2D.b2ContactManager_allocator_set, doc=r"""allocator : p.b2BlockAllocator""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactManager___hash__)
def __repr__(self):
return _format_repr(self)
__swig_destroy__ = _Box2D.delete_b2ContactManager
# Register b2ContactManager in _Box2D:
_Box2D.b2ContactManager_swigregister(b2ContactManager)
b2_stackSize = b2Globals.b2_stackSize
b2_maxStackEntries = b2Globals.b2_maxStackEntries
class b2World(object):
r"""The world class manages all physics entities, dynamic simulation, and asynchronous queries. The world also contains efficient memory management facilities."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, gravity=(0, -10), doSleep=True, **kwargs):
"""__init__(self, gravity=(0, -10), doSleep=True, **kwargs) -> b2World
Additional kwargs like contactListener will be passed after the world is created.
Examples:
b2World(gravity=(0,-10), doSleep=True)
b2World(contactListener=myListener)
"""
self.__data = {}
_Box2D.b2World_swiginit(self,_Box2D.new_b2World(gravity))
self.allowSleeping = doSleep
for key, value in kwargs.items():
try:
setattr(self, key, value)
except Exception as ex:
raise ex.__class__('Failed on kwargs, class="%s" key="%s": %s' \
% (self.__class__.__name__, key, ex))
__swig_destroy__ = _Box2D.delete_b2World
__SetDestructionListener_internal = _swig_new_instance_method(_Box2D.b2World___SetDestructionListener_internal)
__SetContactFilter_internal = _swig_new_instance_method(_Box2D.b2World___SetContactFilter_internal)
__SetContactListener_internal = _swig_new_instance_method(_Box2D.b2World___SetContactListener_internal)
__SetDebugDraw_internal = _swig_new_instance_method(_Box2D.b2World___SetDebugDraw_internal)
Step = _swig_new_instance_method(_Box2D.b2World_Step)
ClearForces = _swig_new_instance_method(_Box2D.b2World_ClearForces)
DrawDebugData = _swig_new_instance_method(_Box2D.b2World_DrawDebugData)
QueryAABB = _swig_new_instance_method(_Box2D.b2World_QueryAABB)
RayCast = _swig_new_instance_method(_Box2D.b2World_RayCast)
__GetBodyList_internal = _swig_new_instance_method(_Box2D.b2World___GetBodyList_internal)
__GetJointList_internal = _swig_new_instance_method(_Box2D.b2World___GetJointList_internal)
__GetContactList_internal = _swig_new_instance_method(_Box2D.b2World___GetContactList_internal)
SetAllowSleeping = _swig_new_instance_method(_Box2D.b2World_SetAllowSleeping)
GetAllowSleeping = _swig_new_instance_method(_Box2D.b2World_GetAllowSleeping)
__SetWarmStarting = _swig_new_instance_method(_Box2D.b2World___SetWarmStarting)
__GetWarmStarting = _swig_new_instance_method(_Box2D.b2World___GetWarmStarting)
__SetContinuousPhysics = _swig_new_instance_method(_Box2D.b2World___SetContinuousPhysics)
__GetContinuousPhysics = _swig_new_instance_method(_Box2D.b2World___GetContinuousPhysics)
__SetSubStepping = _swig_new_instance_method(_Box2D.b2World___SetSubStepping)
__GetSubStepping = _swig_new_instance_method(_Box2D.b2World___GetSubStepping)
__GetProxyCount = _swig_new_instance_method(_Box2D.b2World___GetProxyCount)
__GetBodyCount = _swig_new_instance_method(_Box2D.b2World___GetBodyCount)
__GetJointCount = _swig_new_instance_method(_Box2D.b2World___GetJointCount)
__GetContactCount = _swig_new_instance_method(_Box2D.b2World___GetContactCount)
GetTreeHeight = _swig_new_instance_method(_Box2D.b2World_GetTreeHeight)
GetTreeBalance = _swig_new_instance_method(_Box2D.b2World_GetTreeBalance)
GetTreeQuality = _swig_new_instance_method(_Box2D.b2World_GetTreeQuality)
__SetGravity = _swig_new_instance_method(_Box2D.b2World___SetGravity)
__GetGravity = _swig_new_instance_method(_Box2D.b2World___GetGravity)
__IsLocked = _swig_new_instance_method(_Box2D.b2World___IsLocked)
__SetAutoClearForces = _swig_new_instance_method(_Box2D.b2World___SetAutoClearForces)
__GetAutoClearForces = _swig_new_instance_method(_Box2D.b2World___GetAutoClearForces)
ShiftOrigin = _swig_new_instance_method(_Box2D.b2World_ShiftOrigin)
__GetContactManager = _swig_new_instance_method(_Box2D.b2World___GetContactManager)
GetProfile = _swig_new_instance_method(_Box2D.b2World_GetProfile)
Dump = _swig_new_instance_method(_Box2D.b2World_Dump)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2World___hash__)
def __repr__(self):
return _format_repr(self)
__CreateBody = _swig_new_instance_method(_Box2D.b2World___CreateBody)
__CreateJoint = _swig_new_instance_method(_Box2D.b2World___CreateJoint)
DestroyBody = _swig_new_instance_method(_Box2D.b2World_DestroyBody)
DestroyJoint = _swig_new_instance_method(_Box2D.b2World_DestroyJoint)
def __iter__(self):
"""
Iterates over the bodies in the world
"""
for body in self.bodies:
yield body
def CreateDynamicBody(self, **kwargs):
"""
Create a single dynamic body in the world.
Accepts only kwargs to a b2BodyDef. For more information, see
CreateBody and b2BodyDef.
"""
kwargs['type'] = b2_dynamicBody
return self.CreateBody(**kwargs)
def CreateKinematicBody(self, **kwargs):
"""
Create a single kinematic body in the world.
Accepts only kwargs to a b2BodyDef. For more information, see
CreateBody and b2BodyDef.
"""
kwargs['type'] = b2_kinematicBody
return self.CreateBody(**kwargs)
def CreateStaticBody(self, **kwargs):
"""
Create a single static body in the world.
Accepts only kwargs to a b2BodyDef. For more information, see
CreateBody and b2BodyDef.
"""
kwargs['type'] = b2_staticBody
return self.CreateBody(**kwargs)
def CreateBody(self, defn=None, **kwargs):
"""
Create a body in the world.
Takes a single b2BodyDef argument, or kwargs to pass to a temporary b2BodyDef.
world.CreateBody(position=(1,2), angle=1)
is short for:
world.CreateBody(b2BodyDef(position=(1,2), angle=1))
If the definition (or kwargs) sets 'fixtures', they will be created on the
newly created body. A single fixture is also accepted.
CreateBody(..., fixtures=[])
This is short for:
body = CreateBody(...)
for fixture in []:
body.CreateFixture(fixture)
'shapes' and 'shapeFixture' are also accepted:
CreateBody(..., shapes=[], shapeFixture=b2FixtureDef())
This is short for:
body = CreateBody(...)
body.CreateFixturesFromShapes(shapes=[], shapeFixture=b2FixtureDef())
"""
if defn is not None:
if not isinstance(defn, b2BodyDef):
raise TypeError('Expected b2BodyDef')
else:
defn = b2BodyDef(**kwargs)
body=self.__CreateBody(defn)
if defn.fixtures:
if isinstance(defn.fixtures, (list, tuple)):
for fixture in defn.fixtures:
body.CreateFixture(fixture)
else:
body.CreateFixture(defn.fixtures)
if defn.shapes:
body.CreateFixturesFromShapes(shapes=defn.shapes, shapeFixture=defn.shapeFixture)
if 'massData' in kwargs:
body.massData=kwargs['massData']
if 'localCenter' in kwargs:
body.localCenter=kwargs['localCenter']
if 'inertia' in kwargs:
body.inertia=kwargs['inertia']
if 'mass' in kwargs:
body.mass=kwargs['mass']
return body
def CreateDistanceJoint(self, **kwargs):
"""
Create a single b2DistanceJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2DistanceJointDef(**kwargs))
def CreateRopeJoint(self, **kwargs):
"""
Create a single b2RopeJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2RopeJointDef(**kwargs))
def CreateFrictionJoint(self, **kwargs):
"""
Create a single b2FrictionJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2FrictionJointDef(**kwargs))
def CreateGearJoint(self, **kwargs):
"""
Create a single b2GearJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either joint1 or joint2 is left unset.
"""
if 'joint1' not in kwargs or 'joint2' not in kwargs:
raise ValueError('Gear joint requires that both joint1 and joint2 be set')
return self.__CreateJoint(b2GearJointDef(**kwargs))
def CreateWheelJoint(self, **kwargs):
"""
Create a single b2WheelJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2WheelJointDef(**kwargs))
def CreateMouseJoint(self, **kwargs):
"""
Create a single b2MouseJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2MouseJointDef(**kwargs))
def CreatePrismaticJoint(self, **kwargs):
"""
Create a single b2PrismaticJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2PrismaticJointDef(**kwargs))
def CreatePulleyJoint(self, **kwargs):
"""
Create a single b2PulleyJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2PulleyJointDef(**kwargs))
def CreateRevoluteJoint(self, **kwargs):
"""
Create a single b2RevoluteJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2RevoluteJointDef(**kwargs))
def CreateWeldJoint(self, **kwargs):
"""
Create a single b2WeldJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2WeldJointDef(**kwargs))
def CreateMotorJoint(self, **kwargs):
"""
Create a single b2MotorJoint. Only accepts kwargs to the joint definition.
Raises ValueError if either bodyA or bodyB is left unset.
"""
if 'bodyA' not in kwargs or 'bodyB' not in kwargs:
raise ValueError('Requires at least bodyA and bodyB be set')
return self.__CreateJoint(b2MotorJointDef(**kwargs))
def CreateJoint(self, defn=None, type=None, **kwargs):
"""
Create a joint in the world.
Takes a single b2JointDef argument, or kwargs to pass to a temporary b2JointDef.
All of these are exactly equivalent:
world.CreateJoint(type=b2RevoluteJoint, bodyA=body, bodyB=body2)
world.CreateJoint(type=b2RevoluteJointDef, bodyA=body, bodyB=body2)
world.CreateJoint(b2RevoluteJointDef(bodyA=body, bodyB=body2))
"""
if defn is not None:
if not isinstance(defn, b2JointDef):
raise TypeError('Expected b2JointDef')
if defn.bodyA is None or defn.bodyB is None:
raise ValueError('bodyA and bodyB must be set')
else:
if type is not None:
if issubclass(type, b2JointDef):
class_type = type
elif issubclass(type, b2Joint): # a b2Joint passed in, so get the b2JointDef
class_type = globals()[type.__name__ + 'Def']
else:
raise TypeError('Expected "type" to be a b2Joint or b2JointDef')
else:
raise TypeError('Expected "type" to be a b2Joint or b2JointDef')
defn = class_type(**kwargs)
if isinstance(defn, b2GearJointDef):
if not defn.joint1 or not defn.joint2:
raise ValueError('Gear joint requires that both joint1 and joint2 be set')
else:
if not defn.bodyA or not defn.bodyB:
raise ValueError('Body or bodies not set (bodyA, bodyB)')
return self.__CreateJoint(defn)
# The logic behind these functions is that they increase the refcount
# of the listeners as you set them, so it is no longer necessary to keep
# a copy on your own. Upon destruction of the object, it should be cleared
# also clearing the refcount of the function.
# Now using it also to buffer previously write-only values in the shadowed
# class to make them read-write.
def __GetData(self, name):
return self.__data.get(name)
def __SetData(self, name, value, fcn):
self.__data[name] = value
fcn(value)
# Read-write properties
gravity = property(__GetGravity, __SetGravity)
autoClearForces = property(__GetAutoClearForces, __SetAutoClearForces)
destructionListener = property(lambda self: self.__GetData('destruction'),
lambda self, fcn: self.__SetData('destruction', fcn, self.__SetDestructionListener_internal))
contactListener= property(lambda self: self.__GetData('contact'),
lambda self, fcn: self.__SetData('contact', fcn, self.__SetContactListener_internal))
contactFilter= property(lambda self: self.__GetData('contactfilter'),
lambda self, fcn: self.__SetData('contactfilter', fcn, self.__SetContactFilter_internal))
renderer= property(lambda self: self.__GetData('renderer'),
lambda self, fcn: self.__SetData('renderer', fcn, self.__SetDebugDraw_internal))
continuousPhysics = property(__GetContinuousPhysics, __SetContinuousPhysics)
warmStarting = property(__GetWarmStarting, __SetWarmStarting)
subStepping = property(__GetSubStepping, __SetSubStepping)
# Read-only
contactManager= property(__GetContactManager, None)
contactCount = property(__GetContactCount, None)
bodyCount = property(__GetBodyCount, None)
jointCount = property(__GetJointCount, None)
proxyCount = property(__GetProxyCount, None)
joints = property(lambda self: _list_from_linked_list(self.__GetJointList_internal()), None,
doc="""All joints in the world. NOTE: This re-creates the list on every call. See also joints_gen.""")
bodies = property(lambda self: _list_from_linked_list(self.__GetBodyList_internal()), None,
doc="""All bodies in the world. NOTE: This re-creates the list on every call. See also bodies_gen.""")
contacts= property(lambda self: _list_from_linked_list(self.__GetContactList_internal()), None,
doc="""All contacts in the world. NOTE: This re-creates the list on every call. See also contacts_gen.""")
joints_gen = property(lambda self: _indexable_generator(_generator_from_linked_list(self.__GetJointList_internal())), None,
doc="""Indexable generator of the connected joints to this body.
NOTE: When not using the whole list, this may be preferable to using 'joints'.""")
bodies_gen = property(lambda self: _indexable_generator(_generator_from_linked_list(self.__GetBodyList_internal())), None,
doc="""Indexable generator of all bodies.
NOTE: When not using the whole list, this may be preferable to using 'bodies'.""")
contacts_gen = property(lambda self: _indexable_generator(_generator_from_linked_list(self.__GetContactList_internal())), None,
doc="""Indexable generator of all contacts.
NOTE: When not using the whole list, this may be preferable to using 'contacts'.""")
locked = property(__IsLocked, None)
# Register b2World in _Box2D:
_Box2D.b2World_swigregister(b2World)
b2MixFriction = _Box2D.b2MixFriction
b2MixRestitution = _Box2D.b2MixRestitution
class b2ContactEdge(object):
r"""A contact edge is used to connect bodies and contacts together in a contact graph where each body is a node and each contact is an edge. A contact edge belongs to a doubly linked list maintained in each attached body. Each contact has two contact nodes, one for each attached body."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
other = property(_Box2D.b2ContactEdge_other_get, _Box2D.b2ContactEdge_other_set, doc=r"""other : p.b2Body""")
contact = property(_Box2D.b2ContactEdge_contact_get, _Box2D.b2ContactEdge_contact_set, doc=r"""contact : p.b2Contact""")
prev = property(_Box2D.b2ContactEdge_prev_get, _Box2D.b2ContactEdge_prev_set, doc=r"""prev : p.b2ContactEdge""")
next = property(_Box2D.b2ContactEdge_next_get, _Box2D.b2ContactEdge_next_set, doc=r"""next : p.b2ContactEdge""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2ContactEdge___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2ContactEdge_swiginit(self,_Box2D.new_b2ContactEdge())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2ContactEdge
# Register b2ContactEdge in _Box2D:
_Box2D.b2ContactEdge_swigregister(b2ContactEdge)
class b2Contact(object):
r"""The class manages contact between two shapes. A contact exists for each overlapping AABB in the broad-phase (except if filtered). Therefore a contact object may exist that has no contact points."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__GetManifold = _swig_new_instance_method(_Box2D.b2Contact___GetManifold)
__GetWorldManifold_internal = _swig_new_instance_method(_Box2D.b2Contact___GetWorldManifold_internal)
__IsTouching = _swig_new_instance_method(_Box2D.b2Contact___IsTouching)
__SetEnabled = _swig_new_instance_method(_Box2D.b2Contact___SetEnabled)
__IsEnabled = _swig_new_instance_method(_Box2D.b2Contact___IsEnabled)
__GetNext = _swig_new_instance_method(_Box2D.b2Contact___GetNext)
__GetFixtureA = _swig_new_instance_method(_Box2D.b2Contact___GetFixtureA)
__GetChildIndexA = _swig_new_instance_method(_Box2D.b2Contact___GetChildIndexA)
__GetFixtureB = _swig_new_instance_method(_Box2D.b2Contact___GetFixtureB)
__GetChildIndexB = _swig_new_instance_method(_Box2D.b2Contact___GetChildIndexB)
__SetFriction = _swig_new_instance_method(_Box2D.b2Contact___SetFriction)
__GetFriction = _swig_new_instance_method(_Box2D.b2Contact___GetFriction)
ResetFriction = _swig_new_instance_method(_Box2D.b2Contact_ResetFriction)
__SetRestitution = _swig_new_instance_method(_Box2D.b2Contact___SetRestitution)
__GetRestitution = _swig_new_instance_method(_Box2D.b2Contact___GetRestitution)
ResetRestitution = _swig_new_instance_method(_Box2D.b2Contact_ResetRestitution)
__SetTangentSpeed = _swig_new_instance_method(_Box2D.b2Contact___SetTangentSpeed)
__GetTangentSpeed = _swig_new_instance_method(_Box2D.b2Contact___GetTangentSpeed)
Evaluate = _swig_new_instance_method(_Box2D.b2Contact_Evaluate)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Contact___hash__)
def __repr__(self):
return _format_repr(self)
def __GetWorldManifold(self):
ret=b2WorldManifold()
self.__GetWorldManifold_internal(ret)
return ret
# Read-write properties
enabled = property(__IsEnabled, __SetEnabled)
# Read-only
next = property(__GetNext, None)
fixtureB = property(__GetFixtureB, None)
fixtureA = property(__GetFixtureA, None)
manifold = property(__GetManifold, None)
childIndexA = property(__GetChildIndexA, None)
childIndexB = property(__GetChildIndexB, None)
worldManifold = property(__GetWorldManifold, None)
touching = property(__IsTouching, None)
friction = property(__GetFriction, __SetFriction)
restitution = property(__GetRestitution, __SetRestitution)
tangentSpeed = property(__GetTangentSpeed, __SetTangentSpeed)
# Register b2Contact in _Box2D:
_Box2D.b2Contact_swigregister(b2Contact)
e_wheelJoint = _Box2D.e_wheelJoint
e_ropeJoint = _Box2D.e_ropeJoint
class b2Jacobian(object):
r"""Proxy of C++ b2Jacobian class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
linear = property(_Box2D.b2Jacobian_linear_get, _Box2D.b2Jacobian_linear_set, doc=r"""linear : b2Vec2""")
angularA = property(_Box2D.b2Jacobian_angularA_get, _Box2D.b2Jacobian_angularA_set, doc=r"""angularA : float32""")
angularB = property(_Box2D.b2Jacobian_angularB_get, _Box2D.b2Jacobian_angularB_set, doc=r"""angularB : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Jacobian___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2Jacobian_swiginit(self,_Box2D.new_b2Jacobian())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2Jacobian
# Register b2Jacobian in _Box2D:
_Box2D.b2Jacobian_swigregister(b2Jacobian)
class b2JointEdge(object):
r"""A joint edge is used to connect bodies and joints together in a joint graph where each body is a node and each joint is an edge. A joint edge belongs to a doubly linked list maintained in each attached body. Each joint has two joint nodes, one for each attached body."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
other = property(_Box2D.b2JointEdge_other_get, _Box2D.b2JointEdge_other_set, doc=r"""other : p.b2Body""")
joint = property(_Box2D.b2JointEdge_joint_get, _Box2D.b2JointEdge_joint_set, doc=r"""joint : p.b2Joint""")
prev = property(_Box2D.b2JointEdge_prev_get, _Box2D.b2JointEdge_prev_set, doc=r"""prev : p.b2JointEdge""")
next = property(_Box2D.b2JointEdge_next_get, _Box2D.b2JointEdge_next_set, doc=r"""next : p.b2JointEdge""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2JointEdge___hash__)
def __repr__(self):
return _format_repr(self)
def __init__(self, **kwargs):
_Box2D.b2JointEdge_swiginit(self,_Box2D.new_b2JointEdge())
_init_kwargs(self, **kwargs)
__swig_destroy__ = _Box2D.delete_b2JointEdge
# Register b2JointEdge in _Box2D:
_Box2D.b2JointEdge_swigregister(b2JointEdge)
class b2JointDef(object):
r"""Joint definitions are used to construct joints."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2JointDef_swiginit(self,_Box2D.new_b2JointDef())
_init_kwargs(self, **kwargs)
type = property(_Box2D.b2JointDef_type_get, _Box2D.b2JointDef_type_set, doc=r"""type : b2JointType""")
bodyA = property(_Box2D.b2JointDef_bodyA_get, _Box2D.b2JointDef_bodyA_set, doc=r"""bodyA : p.b2Body""")
bodyB = property(_Box2D.b2JointDef_bodyB_get, _Box2D.b2JointDef_bodyB_set, doc=r"""bodyB : p.b2Body""")
collideConnected = property(_Box2D.b2JointDef_collideConnected_get, _Box2D.b2JointDef_collideConnected_set, doc=r"""collideConnected : bool""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2JointDef___hash__)
def __repr__(self):
return _format_repr(self)
__GetUserData = _swig_new_instance_method(_Box2D.b2JointDef___GetUserData)
__SetUserData = _swig_new_instance_method(_Box2D.b2JointDef___SetUserData)
ClearUserData = _swig_new_instance_method(_Box2D.b2JointDef_ClearUserData)
userData = property(__GetUserData, __SetUserData)
def __del__(self):
self.ClearUserData()
def to_kwargs(self):
"""
Returns a dictionary representing this joint definition
"""
def is_prop(attr):
try:
is_property = isinstance(getattr(cls, attr), property)
except AttributeError:
return False
return is_property and attr not in skip_props
skip_props = ['anchor', 'anchorA', 'anchorB', 'axis']
cls = type(self)
return {attr: getattr(self, attr)
for attr in dir(self)
if is_prop(attr)
}
__swig_destroy__ = _Box2D.delete_b2JointDef
# Register b2JointDef in _Box2D:
_Box2D.b2JointDef_swigregister(b2JointDef)
class b2Joint(object):
r"""The base joint class. Joints are used to constraint two bodies together in various fashions. Some joints also feature limits and motors."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__GetType = _swig_new_instance_method(_Box2D.b2Joint___GetType)
__GetBodyA = _swig_new_instance_method(_Box2D.b2Joint___GetBodyA)
__GetBodyB = _swig_new_instance_method(_Box2D.b2Joint___GetBodyB)
__GetAnchorA = _swig_new_instance_method(_Box2D.b2Joint___GetAnchorA)
__GetAnchorB = _swig_new_instance_method(_Box2D.b2Joint___GetAnchorB)
GetReactionForce = _swig_new_instance_method(_Box2D.b2Joint_GetReactionForce)
GetReactionTorque = _swig_new_instance_method(_Box2D.b2Joint_GetReactionTorque)
__GetNext = _swig_new_instance_method(_Box2D.b2Joint___GetNext)
__IsActive = _swig_new_instance_method(_Box2D.b2Joint___IsActive)
__GetCollideConnected = _swig_new_instance_method(_Box2D.b2Joint___GetCollideConnected)
Dump = _swig_new_instance_method(_Box2D.b2Joint_Dump)
ShiftOrigin = _swig_new_instance_method(_Box2D.b2Joint_ShiftOrigin)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2Joint___hash__)
def __repr__(self):
return _format_repr(self)
__GetUserData = _swig_new_instance_method(_Box2D.b2Joint___GetUserData)
__SetUserData = _swig_new_instance_method(_Box2D.b2Joint___SetUserData)
ClearUserData = _swig_new_instance_method(_Box2D.b2Joint_ClearUserData)
userData = property(__GetUserData, __SetUserData)
__eq__ = b2JointCompare
__ne__ = lambda self,other: not b2JointCompare(self,other)
# Read-only
next = property(__GetNext, None)
bodyA = property(__GetBodyA, None)
bodyB = property(__GetBodyB, None)
type = property(__GetType, None)
active = property(__IsActive, None)
anchorB = property(__GetAnchorB, None)
anchorA = property(__GetAnchorA, None)
collideConnected = property(__GetCollideConnected, None)
def getAsType(self):
"""
Backward compatibility
"""
return self
# Register b2Joint in _Box2D:
_Box2D.b2Joint_swigregister(b2Joint)
class b2DistanceJointDef(b2JointDef):
r"""
Distance joint definition. This requires defining an anchor point on both bodies and the non-zero length of the distance joint. The definition uses local anchor points so that the initial configuration can violate the constraint slightly. This helps when saving and loading a game.
WARNING:
Do not use a zero or short length.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2DistanceJointDef_swiginit(self,_Box2D.new_b2DistanceJointDef())
_init_jointdef_kwargs(self, **kwargs)
if 'localAnchorA' in kwargs and 'localAnchorB' in kwargs and 'length' not in kwargs:
self.__update_length()
Initialize = _swig_new_instance_method(_Box2D.b2DistanceJointDef_Initialize)
localAnchorA = property(_Box2D.b2DistanceJointDef_localAnchorA_get, _Box2D.b2DistanceJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2DistanceJointDef_localAnchorB_get, _Box2D.b2DistanceJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
length = property(_Box2D.b2DistanceJointDef_length_get, _Box2D.b2DistanceJointDef_length_set, doc=r"""length : float32""")
frequencyHz = property(_Box2D.b2DistanceJointDef_frequencyHz_get, _Box2D.b2DistanceJointDef_frequencyHz_set, doc=r"""frequencyHz : float32""")
dampingRatio = property(_Box2D.b2DistanceJointDef_dampingRatio_get, _Box2D.b2DistanceJointDef_dampingRatio_set, doc=r"""dampingRatio : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DistanceJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __update_length(self):
if self.bodyA and self.bodyB:
d = self.anchorB - self.anchorA
self.length = d.length
def __set_anchorA(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.__update_length()
def __set_anchorB(self, value):
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorB=self.bodyB.GetLocalPoint(value)
self.__update_length()
def __get_anchorA(self):
if not self.bodyA:
raise ValueError('bodyA not set.')
return self.bodyA.GetWorldPoint(self.localAnchorA)
def __get_anchorB(self):
if not self.bodyB:
raise ValueError('bodyB not set.')
return self.bodyB.GetWorldPoint(self.localAnchorB)
anchorA = property(__get_anchorA, __set_anchorA,
doc="""Body A's anchor in world coordinates.
Getting the property depends on both bodyA and localAnchorA.
Setting the property requires that bodyA be set.""")
anchorB = property(__get_anchorB, __set_anchorB,
doc="""Body B's anchor in world coordinates.
Getting the property depends on both bodyB and localAnchorB.
Setting the property requires that bodyB be set.""")
__swig_destroy__ = _Box2D.delete_b2DistanceJointDef
# Register b2DistanceJointDef in _Box2D:
_Box2D.b2DistanceJointDef_swigregister(b2DistanceJointDef)
class b2DistanceJoint(b2Joint):
r"""A distance joint constrains two points on two bodies to remain at a fixed distance from each other. You can view this as a massless, rigid rod."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2DistanceJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2DistanceJoint_GetLocalAnchorB)
__SetLength = _swig_new_instance_method(_Box2D.b2DistanceJoint___SetLength)
__GetLength = _swig_new_instance_method(_Box2D.b2DistanceJoint___GetLength)
__SetFrequency = _swig_new_instance_method(_Box2D.b2DistanceJoint___SetFrequency)
__GetFrequency = _swig_new_instance_method(_Box2D.b2DistanceJoint___GetFrequency)
__SetDampingRatio = _swig_new_instance_method(_Box2D.b2DistanceJoint___SetDampingRatio)
__GetDampingRatio = _swig_new_instance_method(_Box2D.b2DistanceJoint___GetDampingRatio)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2DistanceJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
length = property(__GetLength, __SetLength)
frequency = property(__GetFrequency, __SetFrequency)
dampingRatio = property(__GetDampingRatio, __SetDampingRatio)
__swig_destroy__ = _Box2D.delete_b2DistanceJoint
# Register b2DistanceJoint in _Box2D:
_Box2D.b2DistanceJoint_swigregister(b2DistanceJoint)
class b2FrictionJointDef(b2JointDef):
r"""Friction joint definition."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2FrictionJointDef_swiginit(self,_Box2D.new_b2FrictionJointDef())
_init_jointdef_kwargs(self, **kwargs)
Initialize = _swig_new_instance_method(_Box2D.b2FrictionJointDef_Initialize)
localAnchorA = property(_Box2D.b2FrictionJointDef_localAnchorA_get, _Box2D.b2FrictionJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2FrictionJointDef_localAnchorB_get, _Box2D.b2FrictionJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
maxForce = property(_Box2D.b2FrictionJointDef_maxForce_get, _Box2D.b2FrictionJointDef_maxForce_set, doc=r"""maxForce : float32""")
maxTorque = property(_Box2D.b2FrictionJointDef_maxTorque_get, _Box2D.b2FrictionJointDef_maxTorque_set, doc=r"""maxTorque : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2FrictionJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __set_anchor(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.localAnchorB=self.bodyB.GetLocalPoint(value)
def __get_anchor(self):
if self.bodyA:
return self.bodyA.GetWorldPoint(self.localAnchorA)
if self.bodyB:
return self.bodyB.GetWorldPoint(self.localAnchorB)
raise ValueError('Neither body was set; unable to get world point.')
anchor = property(__get_anchor, __set_anchor,
doc="""The anchor in world coordinates.
Getting the property depends on either bodyA and localAnchorA or
bodyB and localAnchorB.
Setting the property requires that both bodies be set.""")
__swig_destroy__ = _Box2D.delete_b2FrictionJointDef
# Register b2FrictionJointDef in _Box2D:
_Box2D.b2FrictionJointDef_swigregister(b2FrictionJointDef)
class b2FrictionJoint(b2Joint):
r"""Friction joint. This is used for top-down friction. It provides 2D translational friction and angular friction."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2FrictionJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2FrictionJoint_GetLocalAnchorB)
__SetMaxForce = _swig_new_instance_method(_Box2D.b2FrictionJoint___SetMaxForce)
__GetMaxForce = _swig_new_instance_method(_Box2D.b2FrictionJoint___GetMaxForce)
__SetMaxTorque = _swig_new_instance_method(_Box2D.b2FrictionJoint___SetMaxTorque)
__GetMaxTorque = _swig_new_instance_method(_Box2D.b2FrictionJoint___GetMaxTorque)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2FrictionJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
maxForce = property(__GetMaxForce, __SetMaxForce)
maxTorque = property(__GetMaxTorque, __SetMaxTorque)
__swig_destroy__ = _Box2D.delete_b2FrictionJoint
# Register b2FrictionJoint in _Box2D:
_Box2D.b2FrictionJoint_swigregister(b2FrictionJoint)
class b2GearJointDef(b2JointDef):
r"""Gear joint definition. This definition requires two existing revolute or prismatic joints (any combination will work). The provided joints must attach a dynamic body to a static body."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2GearJointDef_swiginit(self,_Box2D.new_b2GearJointDef())
_init_kwargs(self, **kwargs)
joint1 = property(_Box2D.b2GearJointDef_joint1_get, _Box2D.b2GearJointDef_joint1_set, doc=r"""joint1 : p.b2Joint""")
joint2 = property(_Box2D.b2GearJointDef_joint2_get, _Box2D.b2GearJointDef_joint2_set, doc=r"""joint2 : p.b2Joint""")
ratio = property(_Box2D.b2GearJointDef_ratio_get, _Box2D.b2GearJointDef_ratio_set, doc=r"""ratio : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2GearJointDef___hash__)
def __repr__(self):
return _format_repr(self)
__swig_destroy__ = _Box2D.delete_b2GearJointDef
# Register b2GearJointDef in _Box2D:
_Box2D.b2GearJointDef_swigregister(b2GearJointDef)
class b2GearJoint(b2Joint):
r"""
A gear joint is used to connect two joints together. Either joint can be a revolute or prismatic joint. You specify a gear ratio to bind the motions together: coordinate1 + ratio * coordinate2 = constant The ratio can be negative or positive. If one joint is a revolute joint and the other joint is a prismatic joint, then the ratio will have units of length or units of 1/length.
WARNING:
The revolute and prismatic joints must be attached to fixed bodies (which must be body1 on those joints).
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetJoint1 = _swig_new_instance_method(_Box2D.b2GearJoint_GetJoint1)
GetJoint2 = _swig_new_instance_method(_Box2D.b2GearJoint_GetJoint2)
__SetRatio = _swig_new_instance_method(_Box2D.b2GearJoint___SetRatio)
__GetRatio = _swig_new_instance_method(_Box2D.b2GearJoint___GetRatio)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2GearJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
ratio = property(__GetRatio, __SetRatio)
__swig_destroy__ = _Box2D.delete_b2GearJoint
# Register b2GearJoint in _Box2D:
_Box2D.b2GearJoint_swigregister(b2GearJoint)
class b2MotorJointDef(b2JointDef):
r"""Proxy of C++ b2MotorJointDef class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, bodyA=None, bodyB=None, **kwargs):
_Box2D.b2MotorJointDef_swiginit(self,_Box2D.new_b2MotorJointDef())
_init_jointdef_kwargs(self, bodyA=bodyA, bodyB=bodyB, **kwargs)
if bodyA is not None and bodyB is not None:
if not kwargs:
self.Initialize(bodyA, bodyB)
Initialize = _swig_new_instance_method(_Box2D.b2MotorJointDef_Initialize)
linearOffset = property(_Box2D.b2MotorJointDef_linearOffset_get, _Box2D.b2MotorJointDef_linearOffset_set, doc=r"""linearOffset : b2Vec2""")
angularOffset = property(_Box2D.b2MotorJointDef_angularOffset_get, _Box2D.b2MotorJointDef_angularOffset_set, doc=r"""angularOffset : float32""")
maxForce = property(_Box2D.b2MotorJointDef_maxForce_get, _Box2D.b2MotorJointDef_maxForce_set, doc=r"""maxForce : float32""")
maxTorque = property(_Box2D.b2MotorJointDef_maxTorque_get, _Box2D.b2MotorJointDef_maxTorque_set, doc=r"""maxTorque : float32""")
correctionFactor = property(_Box2D.b2MotorJointDef_correctionFactor_get, _Box2D.b2MotorJointDef_correctionFactor_set, doc=r"""correctionFactor : float32""")
__swig_destroy__ = _Box2D.delete_b2MotorJointDef
# Register b2MotorJointDef in _Box2D:
_Box2D.b2MotorJointDef_swigregister(b2MotorJointDef)
class b2MotorJoint(b2Joint):
r"""Proxy of C++ b2MotorJoint class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__SetLinearOffset = _swig_new_instance_method(_Box2D.b2MotorJoint___SetLinearOffset)
__GetLinearOffset = _swig_new_instance_method(_Box2D.b2MotorJoint___GetLinearOffset)
__SetAngularOffset = _swig_new_instance_method(_Box2D.b2MotorJoint___SetAngularOffset)
__GetAngularOffset = _swig_new_instance_method(_Box2D.b2MotorJoint___GetAngularOffset)
__SetMaxForce = _swig_new_instance_method(_Box2D.b2MotorJoint___SetMaxForce)
__GetMaxForce = _swig_new_instance_method(_Box2D.b2MotorJoint___GetMaxForce)
__SetMaxTorque = _swig_new_instance_method(_Box2D.b2MotorJoint___SetMaxTorque)
__GetMaxTorque = _swig_new_instance_method(_Box2D.b2MotorJoint___GetMaxTorque)
# Read-write properties
maxForce = property(__GetMaxForce, __SetMaxForce)
maxTorque = property(__GetMaxTorque, __SetMaxTorque)
linearOffset = property(__GetLinearOffset, __SetLinearOffset)
angularOffset = property(__GetAngularOffset, __SetAngularOffset)
__swig_destroy__ = _Box2D.delete_b2MotorJoint
# Register b2MotorJoint in _Box2D:
_Box2D.b2MotorJoint_swigregister(b2MotorJoint)
class b2MouseJointDef(b2JointDef):
r"""Mouse joint definition. This requires a world target point, tuning parameters, and the time step."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2MouseJointDef_swiginit(self,_Box2D.new_b2MouseJointDef())
_init_kwargs(self, **kwargs)
target = property(_Box2D.b2MouseJointDef_target_get, _Box2D.b2MouseJointDef_target_set, doc=r"""target : b2Vec2""")
maxForce = property(_Box2D.b2MouseJointDef_maxForce_get, _Box2D.b2MouseJointDef_maxForce_set, doc=r"""maxForce : float32""")
frequencyHz = property(_Box2D.b2MouseJointDef_frequencyHz_get, _Box2D.b2MouseJointDef_frequencyHz_set, doc=r"""frequencyHz : float32""")
dampingRatio = property(_Box2D.b2MouseJointDef_dampingRatio_get, _Box2D.b2MouseJointDef_dampingRatio_set, doc=r"""dampingRatio : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2MouseJointDef___hash__)
def __repr__(self):
return _format_repr(self)
__swig_destroy__ = _Box2D.delete_b2MouseJointDef
# Register b2MouseJointDef in _Box2D:
_Box2D.b2MouseJointDef_swigregister(b2MouseJointDef)
class b2MouseJoint(b2Joint):
r"""A mouse joint is used to make a point on a body track a specified world point. This a soft constraint with a maximum force. This allows the constraint to stretch and without applying huge forces. NOTE: this joint is not documented in the manual because it was developed to be used in the testbed. If you want to learn how to use the mouse joint, look at the testbed."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__SetTarget = _swig_new_instance_method(_Box2D.b2MouseJoint___SetTarget)
__GetTarget = _swig_new_instance_method(_Box2D.b2MouseJoint___GetTarget)
__SetMaxForce = _swig_new_instance_method(_Box2D.b2MouseJoint___SetMaxForce)
__GetMaxForce = _swig_new_instance_method(_Box2D.b2MouseJoint___GetMaxForce)
__SetFrequency = _swig_new_instance_method(_Box2D.b2MouseJoint___SetFrequency)
__GetFrequency = _swig_new_instance_method(_Box2D.b2MouseJoint___GetFrequency)
__SetDampingRatio = _swig_new_instance_method(_Box2D.b2MouseJoint___SetDampingRatio)
__GetDampingRatio = _swig_new_instance_method(_Box2D.b2MouseJoint___GetDampingRatio)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2MouseJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
maxForce = property(__GetMaxForce, __SetMaxForce)
frequency = property(__GetFrequency, __SetFrequency)
dampingRatio = property(__GetDampingRatio, __SetDampingRatio)
target = property(__GetTarget, __SetTarget)
__swig_destroy__ = _Box2D.delete_b2MouseJoint
# Register b2MouseJoint in _Box2D:
_Box2D.b2MouseJoint_swigregister(b2MouseJoint)
class b2PrismaticJointDef(b2JointDef):
r"""
Prismatic joint definition. This requires defining a line of motion using an axis and an anchor point. The definition uses local anchor points and a local axis so that the initial configuration can violate the constraint slightly. The joint translation is zero when the local anchor points coincide in world space. Using local anchors and a local axis helps when saving and loading a game.
WARNING:
at least one body should by dynamic with a non-fixed rotation.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2PrismaticJointDef_swiginit(self,_Box2D.new_b2PrismaticJointDef())
_init_jointdef_kwargs(self, **kwargs)
if self.bodyA and self.bodyB and 'referenceAngle' not in kwargs:
self.referenceAngle = self.bodyB.angle - self.bodyA.angle
Initialize = _swig_new_instance_method(_Box2D.b2PrismaticJointDef_Initialize)
localAnchorA = property(_Box2D.b2PrismaticJointDef_localAnchorA_get, _Box2D.b2PrismaticJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2PrismaticJointDef_localAnchorB_get, _Box2D.b2PrismaticJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
localAxisA = property(_Box2D.b2PrismaticJointDef_localAxisA_get, _Box2D.b2PrismaticJointDef_localAxisA_set, doc=r"""localAxisA : b2Vec2""")
referenceAngle = property(_Box2D.b2PrismaticJointDef_referenceAngle_get, _Box2D.b2PrismaticJointDef_referenceAngle_set, doc=r"""referenceAngle : float32""")
enableLimit = property(_Box2D.b2PrismaticJointDef_enableLimit_get, _Box2D.b2PrismaticJointDef_enableLimit_set, doc=r"""enableLimit : bool""")
lowerTranslation = property(_Box2D.b2PrismaticJointDef_lowerTranslation_get, _Box2D.b2PrismaticJointDef_lowerTranslation_set, doc=r"""lowerTranslation : float32""")
upperTranslation = property(_Box2D.b2PrismaticJointDef_upperTranslation_get, _Box2D.b2PrismaticJointDef_upperTranslation_set, doc=r"""upperTranslation : float32""")
enableMotor = property(_Box2D.b2PrismaticJointDef_enableMotor_get, _Box2D.b2PrismaticJointDef_enableMotor_set, doc=r"""enableMotor : bool""")
maxMotorForce = property(_Box2D.b2PrismaticJointDef_maxMotorForce_get, _Box2D.b2PrismaticJointDef_maxMotorForce_set, doc=r"""maxMotorForce : float32""")
motorSpeed = property(_Box2D.b2PrismaticJointDef_motorSpeed_get, _Box2D.b2PrismaticJointDef_motorSpeed_set, doc=r"""motorSpeed : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2PrismaticJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __set_anchor(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.localAnchorB=self.bodyB.GetLocalPoint(value)
def __get_anchor(self):
if self.bodyA:
return self.bodyA.GetWorldPoint(self.localAnchorA)
if self.bodyB:
return self.bodyB.GetWorldPoint(self.localAnchorB)
raise ValueError('Neither body was set; unable to get world point.')
def __set_axis(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
self.localAxisA=self.bodyA.GetLocalVector(value)
def __get_axis(self):
if not self.bodyA:
raise ValueError('Body A unset; unable to get world vector.')
return self.bodyA.GetWorldVector(self.localAxisA)
anchor = property(__get_anchor, __set_anchor,
doc="""The anchor in world coordinates.
Getting the property depends on either bodyA and localAnchorA or
bodyB and localAnchorB.
Setting the property requires that both bodies be set.""")
axis = property(__get_axis, __set_axis,
doc="""The world translation axis on bodyA.
Getting the property depends on bodyA and localAxisA.
Setting the property requires that bodyA be set.""")
__swig_destroy__ = _Box2D.delete_b2PrismaticJointDef
# Register b2PrismaticJointDef in _Box2D:
_Box2D.b2PrismaticJointDef_swigregister(b2PrismaticJointDef)
class b2PrismaticJoint(b2Joint):
r"""A prismatic joint. This joint provides one degree of freedom: translation along an axis fixed in body1. Relative rotation is prevented. You can use a joint limit to restrict the range of motion and a joint motor to drive the motion or to model joint friction."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2PrismaticJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2PrismaticJoint_GetLocalAnchorB)
GetLocalAxisA = _swig_new_instance_method(_Box2D.b2PrismaticJoint_GetLocalAxisA)
GetReferenceAngle = _swig_new_instance_method(_Box2D.b2PrismaticJoint_GetReferenceAngle)
__GetJointTranslation = _swig_new_instance_method(_Box2D.b2PrismaticJoint___GetJointTranslation)
__GetJointSpeed = _swig_new_instance_method(_Box2D.b2PrismaticJoint___GetJointSpeed)
__IsLimitEnabled = _swig_new_instance_method(_Box2D.b2PrismaticJoint___IsLimitEnabled)
__EnableLimit = _swig_new_instance_method(_Box2D.b2PrismaticJoint___EnableLimit)
__GetLowerLimit = _swig_new_instance_method(_Box2D.b2PrismaticJoint___GetLowerLimit)
__GetUpperLimit = _swig_new_instance_method(_Box2D.b2PrismaticJoint___GetUpperLimit)
SetLimits = _swig_new_instance_method(_Box2D.b2PrismaticJoint_SetLimits)
__IsMotorEnabled = _swig_new_instance_method(_Box2D.b2PrismaticJoint___IsMotorEnabled)
__EnableMotor = _swig_new_instance_method(_Box2D.b2PrismaticJoint___EnableMotor)
__SetMotorSpeed = _swig_new_instance_method(_Box2D.b2PrismaticJoint___SetMotorSpeed)
__GetMotorSpeed = _swig_new_instance_method(_Box2D.b2PrismaticJoint___GetMotorSpeed)
__SetMaxMotorForce = _swig_new_instance_method(_Box2D.b2PrismaticJoint___SetMaxMotorForce)
__GetMaxMotorForce = _swig_new_instance_method(_Box2D.b2PrismaticJoint___GetMaxMotorForce)
GetMotorForce = _swig_new_instance_method(_Box2D.b2PrismaticJoint_GetMotorForce)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2PrismaticJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
motorSpeed = property(__GetMotorSpeed, __SetMotorSpeed)
motorEnabled = property(__IsMotorEnabled, __EnableMotor)
limitEnabled = property(__IsLimitEnabled, __EnableLimit)
upperLimit = property(__GetUpperLimit, lambda self, v: self.SetLimits(self.lowerLimit, v))
lowerLimit = property(__GetLowerLimit, lambda self, v: self.SetLimits(v, self.upperLimit))
limits = property(lambda self: (self.lowerLimit, self.upperLimit), lambda self, v: self.SetLimits(*v) )
maxMotorForce = property(__GetMaxMotorForce, __SetMaxMotorForce)
# Read-only
translation = property(__GetJointTranslation, None)
speed = property(__GetJointSpeed, None)
__swig_destroy__ = _Box2D.delete_b2PrismaticJoint
# Register b2PrismaticJoint in _Box2D:
_Box2D.b2PrismaticJoint_swigregister(b2PrismaticJoint)
class b2PulleyJointDef(b2JointDef):
r"""Pulley joint definition. This requires two ground anchors, two dynamic body anchor points, max lengths for each side, and a pulley ratio."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2PulleyJointDef_swiginit(self,_Box2D.new_b2PulleyJointDef())
_init_jointdef_kwargs(self, **kwargs)
self.__init_pulley__(**kwargs)
def __init_pulley__(self, anchorA=None, anchorB=None, lengthA=None, lengthB=None, groundAnchorA=None, groundAnchorB=None, maxLengthA=None, maxLengthB=None, ratio=None, **kwargs):
lengthA_set, lengthB_set = False, False
if anchorA is not None or anchorB is not None:
# Some undoing -- if the user specified the length, we might
# have overwritten it, so reset it.
if lengthA is not None:
self.lengthA = lengthA
lengthA_set = True
if lengthB is not None:
self.lengthB = lengthB
lengthB_set = True
if anchorA is not None and groundAnchorA is not None and lengthA is None:
d1 = self.anchorA - self.groundAnchorA
self.lengthA = d1.length
lengthA_set = True
if anchorB is not None and groundAnchorB is not None and lengthB is None:
d2 = self.anchorB - self.groundAnchorB
self.lengthB = d2.length
lengthB_set=True
if ratio is not None:
# Ratio too small?
assert(self.ratio > globals()['b2_epsilon'])
if lengthA_set and lengthB_set and maxLengthA is None and maxLengthB is None:
C = self.lengthA + self.ratio * self.lengthB
self.maxLengthA = C - self.ratio * b2_minPulleyLength
self.maxLengthB = (C - b2_minPulleyLength) / self.ratio
Initialize = _swig_new_instance_method(_Box2D.b2PulleyJointDef_Initialize)
groundAnchorA = property(_Box2D.b2PulleyJointDef_groundAnchorA_get, _Box2D.b2PulleyJointDef_groundAnchorA_set, doc=r"""groundAnchorA : b2Vec2""")
groundAnchorB = property(_Box2D.b2PulleyJointDef_groundAnchorB_get, _Box2D.b2PulleyJointDef_groundAnchorB_set, doc=r"""groundAnchorB : b2Vec2""")
localAnchorA = property(_Box2D.b2PulleyJointDef_localAnchorA_get, _Box2D.b2PulleyJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2PulleyJointDef_localAnchorB_get, _Box2D.b2PulleyJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
lengthA = property(_Box2D.b2PulleyJointDef_lengthA_get, _Box2D.b2PulleyJointDef_lengthA_set, doc=r"""lengthA : float32""")
lengthB = property(_Box2D.b2PulleyJointDef_lengthB_get, _Box2D.b2PulleyJointDef_lengthB_set, doc=r"""lengthB : float32""")
ratio = property(_Box2D.b2PulleyJointDef_ratio_get, _Box2D.b2PulleyJointDef_ratio_set, doc=r"""ratio : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2PulleyJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __update_length(self):
if self.bodyA:
d1 = self.anchorA - self.groundAnchorA
self.lengthA = d1.length
if self.bodyB:
d1 = self.anchorB - self.groundAnchorB
self.lengthB = d1.length
def __set_anchorA(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.__update_length()
def __set_anchorB(self, value):
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorB=self.bodyB.GetLocalPoint(value)
self.__update_length()
def __get_anchorA(self):
if not self.bodyA:
raise ValueError('bodyA not set.')
return self.bodyA.GetWorldPoint(self.localAnchorA)
def __get_anchorB(self):
if not self.bodyB:
raise ValueError('bodyB not set.')
return self.bodyB.GetWorldPoint(self.localAnchorB)
anchorA = property(__get_anchorA, __set_anchorA,
doc="""Body A's anchor in world coordinates.
Getting the property depends on both bodyA and localAnchorA.
Setting the property requires that bodyA be set.""")
anchorB = property(__get_anchorB, __set_anchorB,
doc="""Body B's anchor in world coordinates.
Getting the property depends on both bodyB and localAnchorB.
Setting the property requires that bodyB be set.""")
__swig_destroy__ = _Box2D.delete_b2PulleyJointDef
# Register b2PulleyJointDef in _Box2D:
_Box2D.b2PulleyJointDef_swigregister(b2PulleyJointDef)
b2_minPulleyLength = b2Globals.b2_minPulleyLength
class b2PulleyJoint(b2Joint):
r"""The pulley joint is connected to two bodies and two fixed ground points. The pulley supports a ratio such that: length1 + ratio * length2 <= constant Yes, the force transmitted is scaled by the ratio. The pulley also enforces a maximum length limit on both sides. This is useful to prevent one side of the pulley hitting the top."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__GetGroundAnchorA = _swig_new_instance_method(_Box2D.b2PulleyJoint___GetGroundAnchorA)
__GetGroundAnchorB = _swig_new_instance_method(_Box2D.b2PulleyJoint___GetGroundAnchorB)
__GetLengthA = _swig_new_instance_method(_Box2D.b2PulleyJoint___GetLengthA)
__GetLengthB = _swig_new_instance_method(_Box2D.b2PulleyJoint___GetLengthB)
__GetRatio = _swig_new_instance_method(_Box2D.b2PulleyJoint___GetRatio)
GetCurrentLengthA = _swig_new_instance_method(_Box2D.b2PulleyJoint_GetCurrentLengthA)
GetCurrentLengthB = _swig_new_instance_method(_Box2D.b2PulleyJoint_GetCurrentLengthB)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2PulleyJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-only
groundAnchorB = property(__GetGroundAnchorB, None)
groundAnchorA = property(__GetGroundAnchorA, None)
ratio = property(__GetRatio, None)
lengthB = length2 = property(__GetLengthB, None)
lengthA = length1 = property(__GetLengthA, None)
__swig_destroy__ = _Box2D.delete_b2PulleyJoint
# Register b2PulleyJoint in _Box2D:
_Box2D.b2PulleyJoint_swigregister(b2PulleyJoint)
class b2RevoluteJointDef(b2JointDef):
r"""Revolute joint definition. This requires defining an anchor point where the bodies are joined. The definition uses local anchor points so that the initial configuration can violate the constraint slightly. You also need to specify the initial relative angle for joint limits. This helps when saving and loading a game. The local anchor points are measured from the body's origin rather than the center of mass because: 1. you might not know where the center of mass will be. 2. if you add/remove shapes from a body and recompute the mass, the joints will be broken."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2RevoluteJointDef_swiginit(self,_Box2D.new_b2RevoluteJointDef())
_init_jointdef_kwargs(self, **kwargs)
if self.bodyA and self.bodyB and 'referenceAngle' not in kwargs:
self.referenceAngle = self.bodyB.angle - self.bodyA.angle
Initialize = _swig_new_instance_method(_Box2D.b2RevoluteJointDef_Initialize)
localAnchorA = property(_Box2D.b2RevoluteJointDef_localAnchorA_get, _Box2D.b2RevoluteJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2RevoluteJointDef_localAnchorB_get, _Box2D.b2RevoluteJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
referenceAngle = property(_Box2D.b2RevoluteJointDef_referenceAngle_get, _Box2D.b2RevoluteJointDef_referenceAngle_set, doc=r"""referenceAngle : float32""")
enableLimit = property(_Box2D.b2RevoluteJointDef_enableLimit_get, _Box2D.b2RevoluteJointDef_enableLimit_set, doc=r"""enableLimit : bool""")
lowerAngle = property(_Box2D.b2RevoluteJointDef_lowerAngle_get, _Box2D.b2RevoluteJointDef_lowerAngle_set, doc=r"""lowerAngle : float32""")
upperAngle = property(_Box2D.b2RevoluteJointDef_upperAngle_get, _Box2D.b2RevoluteJointDef_upperAngle_set, doc=r"""upperAngle : float32""")
enableMotor = property(_Box2D.b2RevoluteJointDef_enableMotor_get, _Box2D.b2RevoluteJointDef_enableMotor_set, doc=r"""enableMotor : bool""")
motorSpeed = property(_Box2D.b2RevoluteJointDef_motorSpeed_get, _Box2D.b2RevoluteJointDef_motorSpeed_set, doc=r"""motorSpeed : float32""")
maxMotorTorque = property(_Box2D.b2RevoluteJointDef_maxMotorTorque_get, _Box2D.b2RevoluteJointDef_maxMotorTorque_set, doc=r"""maxMotorTorque : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RevoluteJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __set_anchor(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.localAnchorB=self.bodyB.GetLocalPoint(value)
def __get_anchor(self):
if self.bodyA:
return self.bodyA.GetWorldPoint(self.localAnchorA)
if self.bodyB:
return self.bodyB.GetWorldPoint(self.localAnchorB)
raise ValueError('Neither body was set; unable to get world point.')
anchor = property(__get_anchor, __set_anchor,
doc="""The anchor in world coordinates.
Getting the property depends on either bodyA and localAnchorA or
bodyB and localAnchorB.
Setting the property requires that both bodies be set.""")
__swig_destroy__ = _Box2D.delete_b2RevoluteJointDef
# Register b2RevoluteJointDef in _Box2D:
_Box2D.b2RevoluteJointDef_swigregister(b2RevoluteJointDef)
class b2RevoluteJoint(b2Joint):
r"""A revolute joint constrains two bodies to share a common point while they are free to rotate about the point. The relative rotation about the shared point is the joint angle. You can limit the relative rotation with a joint limit that specifies a lower and upper angle. You can use a motor to drive the relative rotation about the shared point. A maximum motor torque is provided so that infinite forces are not generated."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2RevoluteJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2RevoluteJoint_GetLocalAnchorB)
GetReferenceAngle = _swig_new_instance_method(_Box2D.b2RevoluteJoint_GetReferenceAngle)
__GetJointAngle = _swig_new_instance_method(_Box2D.b2RevoluteJoint___GetJointAngle)
__GetJointSpeed = _swig_new_instance_method(_Box2D.b2RevoluteJoint___GetJointSpeed)
__IsLimitEnabled = _swig_new_instance_method(_Box2D.b2RevoluteJoint___IsLimitEnabled)
__EnableLimit = _swig_new_instance_method(_Box2D.b2RevoluteJoint___EnableLimit)
__GetLowerLimit = _swig_new_instance_method(_Box2D.b2RevoluteJoint___GetLowerLimit)
__GetUpperLimit = _swig_new_instance_method(_Box2D.b2RevoluteJoint___GetUpperLimit)
SetLimits = _swig_new_instance_method(_Box2D.b2RevoluteJoint_SetLimits)
__IsMotorEnabled = _swig_new_instance_method(_Box2D.b2RevoluteJoint___IsMotorEnabled)
__EnableMotor = _swig_new_instance_method(_Box2D.b2RevoluteJoint___EnableMotor)
__SetMotorSpeed = _swig_new_instance_method(_Box2D.b2RevoluteJoint___SetMotorSpeed)
__GetMotorSpeed = _swig_new_instance_method(_Box2D.b2RevoluteJoint___GetMotorSpeed)
__SetMaxMotorTorque = _swig_new_instance_method(_Box2D.b2RevoluteJoint___SetMaxMotorTorque)
GetMaxMotorTorque = _swig_new_instance_method(_Box2D.b2RevoluteJoint_GetMaxMotorTorque)
GetMotorTorque = _swig_new_instance_method(_Box2D.b2RevoluteJoint_GetMotorTorque)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RevoluteJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
motorSpeed = property(__GetMotorSpeed, __SetMotorSpeed)
upperLimit = property(__GetUpperLimit, lambda self, v: self.SetLimits(self.lowerLimit, v))
lowerLimit = property(__GetLowerLimit, lambda self, v: self.SetLimits(v, self.upperLimit))
limits = property(lambda self: (self.lowerLimit, self.upperLimit), lambda self, v: self.SetLimits(*v) )
motorEnabled = property(__IsMotorEnabled, __EnableMotor)
limitEnabled = property(__IsLimitEnabled, __EnableLimit)
# Read-only
angle = property(__GetJointAngle, None)
speed = property(__GetJointSpeed, None)
# Write-only
maxMotorTorque = property(None, __SetMaxMotorTorque)
__swig_destroy__ = _Box2D.delete_b2RevoluteJoint
# Register b2RevoluteJoint in _Box2D:
_Box2D.b2RevoluteJoint_swigregister(b2RevoluteJoint)
class b2RopeJointDef(b2JointDef):
r"""Rope joint definition. This requires two body anchor points and a maximum lengths. Note: by default the connected objects will not collide. see collideConnected in b2JointDef."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2RopeJointDef_swiginit(self,_Box2D.new_b2RopeJointDef())
_init_jointdef_kwargs(self, **kwargs)
localAnchorA = property(_Box2D.b2RopeJointDef_localAnchorA_get, _Box2D.b2RopeJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2RopeJointDef_localAnchorB_get, _Box2D.b2RopeJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
maxLength = property(_Box2D.b2RopeJointDef_maxLength_get, _Box2D.b2RopeJointDef_maxLength_set, doc=r"""maxLength : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RopeJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __set_anchorA(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
def __set_anchorB(self, value):
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorB=self.bodyB.GetLocalPoint(value)
def __get_anchorA(self):
if not self.bodyA:
raise ValueError('bodyA not set.')
return self.bodyA.GetWorldPoint(self.localAnchorA)
def __get_anchorB(self):
if not self.bodyB:
raise ValueError('bodyB not set.')
return self.bodyB.GetWorldPoint(self.localAnchorB)
anchorA = property(__get_anchorA, __set_anchorA,
doc="""Body A's anchor in world coordinates.
Getting the property depends on both bodyA and localAnchorA.
Setting the property requires that bodyA be set.""")
anchorB = property(__get_anchorB, __set_anchorB,
doc="""Body B's anchor in world coordinates.
Getting the property depends on both bodyB and localAnchorB.
Setting the property requires that bodyB be set.""")
__swig_destroy__ = _Box2D.delete_b2RopeJointDef
# Register b2RopeJointDef in _Box2D:
_Box2D.b2RopeJointDef_swigregister(b2RopeJointDef)
class b2RopeJoint(b2Joint):
r"""A rope joint enforces a maximum distance between two points on two bodies. It has no other effect. Warning: if you attempt to change the maximum length during the simulation you will get some non-physical behavior. A model that would allow you to dynamically modify the length would have some sponginess, so I chose not to implement it that way. See b2DistanceJointif you want to dynamically control length."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2RopeJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2RopeJoint_GetLocalAnchorB)
SetMaxLength = _swig_new_instance_method(_Box2D.b2RopeJoint_SetMaxLength)
__GetMaxLength = _swig_new_instance_method(_Box2D.b2RopeJoint___GetMaxLength)
__GetLimitState = _swig_new_instance_method(_Box2D.b2RopeJoint___GetLimitState)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2RopeJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-only properties
maxLength = property(__GetMaxLength, None)
limitState = property(__GetLimitState, None)
# Read-write properties
__swig_destroy__ = _Box2D.delete_b2RopeJoint
# Register b2RopeJoint in _Box2D:
_Box2D.b2RopeJoint_swigregister(b2RopeJoint)
class b2WeldJointDef(b2JointDef):
r"""Weld joint definition. You need to specify local anchor points where they are attached and the relative body angle. The position of the anchor points is important for computing the reaction torque."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2WeldJointDef_swiginit(self,_Box2D.new_b2WeldJointDef())
_init_jointdef_kwargs(self, **kwargs)
if self.bodyA and self.bodyB and 'referenceAngle' not in kwargs:
self.referenceAngle = self.bodyB.angle - self.bodyA.angle
Initialize = _swig_new_instance_method(_Box2D.b2WeldJointDef_Initialize)
localAnchorA = property(_Box2D.b2WeldJointDef_localAnchorA_get, _Box2D.b2WeldJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2WeldJointDef_localAnchorB_get, _Box2D.b2WeldJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
referenceAngle = property(_Box2D.b2WeldJointDef_referenceAngle_get, _Box2D.b2WeldJointDef_referenceAngle_set, doc=r"""referenceAngle : float32""")
frequencyHz = property(_Box2D.b2WeldJointDef_frequencyHz_get, _Box2D.b2WeldJointDef_frequencyHz_set, doc=r"""frequencyHz : float32""")
dampingRatio = property(_Box2D.b2WeldJointDef_dampingRatio_get, _Box2D.b2WeldJointDef_dampingRatio_set, doc=r"""dampingRatio : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2WeldJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __set_anchor(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.localAnchorB=self.bodyB.GetLocalPoint(value)
def __get_anchor(self):
if self.bodyA:
return self.bodyA.GetWorldPoint(self.localAnchorA)
if self.bodyB:
return self.bodyB.GetWorldPoint(self.localAnchorB)
raise ValueError('Neither body was set; unable to get world point.')
anchor = property(__get_anchor, __set_anchor,
doc="""The anchor in world coordinates.
Getting the property depends on either bodyA and localAnchorA or
bodyB and localAnchorB.
Setting the property requires that both bodies be set.""")
__swig_destroy__ = _Box2D.delete_b2WeldJointDef
# Register b2WeldJointDef in _Box2D:
_Box2D.b2WeldJointDef_swigregister(b2WeldJointDef)
class b2WeldJoint(b2Joint):
r"""A weld joint essentially glues two bodies together. A weld joint may distort somewhat because the island constraint solver is approximate."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2WeldJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2WeldJoint_GetLocalAnchorB)
GetReferenceAngle = _swig_new_instance_method(_Box2D.b2WeldJoint_GetReferenceAngle)
SetFrequency = _swig_new_instance_method(_Box2D.b2WeldJoint_SetFrequency)
GetFrequency = _swig_new_instance_method(_Box2D.b2WeldJoint_GetFrequency)
SetDampingRatio = _swig_new_instance_method(_Box2D.b2WeldJoint_SetDampingRatio)
GetDampingRatio = _swig_new_instance_method(_Box2D.b2WeldJoint_GetDampingRatio)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2WeldJoint___hash__)
def __repr__(self):
return _format_repr(self)
__swig_destroy__ = _Box2D.delete_b2WeldJoint
# Register b2WeldJoint in _Box2D:
_Box2D.b2WeldJoint_swigregister(b2WeldJoint)
class b2WheelJointDef(b2JointDef):
r"""Line joint definition. This requires defining a line of motion using an axis and an anchor point. The definition uses local anchor points and a local axis so that the initial configuration can violate the constraint slightly. The joint translation is zero when the local anchor points coincide in world space. Using local anchors and a local axis helps when saving and loading a game."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, **kwargs):
_Box2D.b2WheelJointDef_swiginit(self,_Box2D.new_b2WheelJointDef())
_init_jointdef_kwargs(self, **kwargs)
Initialize = _swig_new_instance_method(_Box2D.b2WheelJointDef_Initialize)
localAnchorA = property(_Box2D.b2WheelJointDef_localAnchorA_get, _Box2D.b2WheelJointDef_localAnchorA_set, doc=r"""localAnchorA : b2Vec2""")
localAnchorB = property(_Box2D.b2WheelJointDef_localAnchorB_get, _Box2D.b2WheelJointDef_localAnchorB_set, doc=r"""localAnchorB : b2Vec2""")
localAxisA = property(_Box2D.b2WheelJointDef_localAxisA_get, _Box2D.b2WheelJointDef_localAxisA_set, doc=r"""localAxisA : b2Vec2""")
enableMotor = property(_Box2D.b2WheelJointDef_enableMotor_get, _Box2D.b2WheelJointDef_enableMotor_set, doc=r"""enableMotor : bool""")
maxMotorTorque = property(_Box2D.b2WheelJointDef_maxMotorTorque_get, _Box2D.b2WheelJointDef_maxMotorTorque_set, doc=r"""maxMotorTorque : float32""")
motorSpeed = property(_Box2D.b2WheelJointDef_motorSpeed_get, _Box2D.b2WheelJointDef_motorSpeed_set, doc=r"""motorSpeed : float32""")
frequencyHz = property(_Box2D.b2WheelJointDef_frequencyHz_get, _Box2D.b2WheelJointDef_frequencyHz_set, doc=r"""frequencyHz : float32""")
dampingRatio = property(_Box2D.b2WheelJointDef_dampingRatio_get, _Box2D.b2WheelJointDef_dampingRatio_set, doc=r"""dampingRatio : float32""")
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2WheelJointDef___hash__)
def __repr__(self):
return _format_repr(self)
def __set_anchor(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
if not self.bodyB:
raise ValueError('bodyB not set.')
self.localAnchorA=self.bodyA.GetLocalPoint(value)
self.localAnchorB=self.bodyB.GetLocalPoint(value)
def __get_anchor(self):
if self.bodyA:
return self.bodyA.GetWorldPoint(self.localAnchorA)
if self.bodyB:
return self.bodyB.GetWorldPoint(self.localAnchorB)
raise ValueError('Neither body was set; unable to get world point.')
def __set_axis(self, value):
if not self.bodyA:
raise ValueError('bodyA not set.')
self.localAxisA=self.bodyA.GetLocalVector(value)
def __get_axis(self):
if self.bodyA:
return self.bodyA.GetWorldVector(self.localAxisA)
raise ValueError('Body A unset; unable to get world vector.')
anchor = property(__get_anchor, __set_anchor,
doc="""The anchor in world coordinates.
Getting the property depends on either bodyA and localAnchorA or
bodyB and localAnchorB.
Setting the property requires that both bodies be set.""")
axis = property(__get_axis, __set_axis,
doc="""The world translation axis on bodyA.
Getting the property depends on bodyA and localAxisA.
Setting the property requires that bodyA be set.""")
__swig_destroy__ = _Box2D.delete_b2WheelJointDef
# Register b2WheelJointDef in _Box2D:
_Box2D.b2WheelJointDef_swigregister(b2WheelJointDef)
class b2WheelJoint(b2Joint):
r"""A line joint. This joint provides two degrees of freedom: translation along an axis fixed in body1 and rotation in the plane. You can use a joint limit to restrict the range of motion and a joint motor to drive the rotation or to model rotational friction. This joint is designed for vehicle suspensions."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
GetLocalAnchorA = _swig_new_instance_method(_Box2D.b2WheelJoint_GetLocalAnchorA)
GetLocalAnchorB = _swig_new_instance_method(_Box2D.b2WheelJoint_GetLocalAnchorB)
GetLocalAxisA = _swig_new_instance_method(_Box2D.b2WheelJoint_GetLocalAxisA)
__GetJointTranslation = _swig_new_instance_method(_Box2D.b2WheelJoint___GetJointTranslation)
__GetJointSpeed = _swig_new_instance_method(_Box2D.b2WheelJoint___GetJointSpeed)
__IsMotorEnabled = _swig_new_instance_method(_Box2D.b2WheelJoint___IsMotorEnabled)
__EnableMotor = _swig_new_instance_method(_Box2D.b2WheelJoint___EnableMotor)
__SetMotorSpeed = _swig_new_instance_method(_Box2D.b2WheelJoint___SetMotorSpeed)
__GetMotorSpeed = _swig_new_instance_method(_Box2D.b2WheelJoint___GetMotorSpeed)
__SetMaxMotorTorque = _swig_new_instance_method(_Box2D.b2WheelJoint___SetMaxMotorTorque)
__GetMaxMotorTorque = _swig_new_instance_method(_Box2D.b2WheelJoint___GetMaxMotorTorque)
GetMotorTorque = _swig_new_instance_method(_Box2D.b2WheelJoint_GetMotorTorque)
__SetSpringFrequencyHz = _swig_new_instance_method(_Box2D.b2WheelJoint___SetSpringFrequencyHz)
__GetSpringFrequencyHz = _swig_new_instance_method(_Box2D.b2WheelJoint___GetSpringFrequencyHz)
__SetSpringDampingRatio = _swig_new_instance_method(_Box2D.b2WheelJoint___SetSpringDampingRatio)
__GetSpringDampingRatio = _swig_new_instance_method(_Box2D.b2WheelJoint___GetSpringDampingRatio)
__dir__ = _dir_filter
__hash__ = _swig_new_instance_method(_Box2D.b2WheelJoint___hash__)
def __repr__(self):
return _format_repr(self)
# Read-write properties
motorSpeed = property(__GetMotorSpeed, __SetMotorSpeed)
motorEnabled = property(__IsMotorEnabled, __EnableMotor)
maxMotorTorque = property(__GetMaxMotorTorque, __SetMaxMotorTorque)
springFrequencyHz = property(__GetSpringFrequencyHz , __SetSpringFrequencyHz)
springDampingRatio = property(__GetSpringDampingRatio , __SetSpringDampingRatio)
# Read-only
speed = property(__GetJointSpeed, None)
translation = property(__GetJointTranslation, None)
__swig_destroy__ = _Box2D.delete_b2WheelJoint
# Register b2WheelJoint in _Box2D:
_Box2D.b2WheelJoint_swigregister(b2WheelJoint)
# Backward-compatibility
b2LoopShape = b2ChainShape
# Initialize the alternative namespace b2.*, and clean-up the
# dir listing of Box2D by removing *_swigregister.
#
# To see what this is, try import Box2D; print(dir(Box2D.b2))
from . import b2
s=None
to_remove=[]
for s in locals():
if s.endswith('_swigregister'):
to_remove.append(s)
elif s!='b2' and s.startswith('b2'):
if s[2]=='_': # Covers b2_*
setattr(b2, s[3].lower() + s[4:], locals()[s])
else: # The other b2*
if s[3].isupper():
setattr(b2, s[2:], locals()[s])
else:
setattr(b2, s[2].lower() + s[3:], locals()[s])
for s in to_remove:
del locals()[s]
del s
del to_remove
| 44.116114 | 576 | 0.710994 |
4a1afe9231a6916c3253642209c929f2ccbc7138
| 25,335 |
py
|
Python
|
sktime/benchmarking/evaluation.py
|
brettkoonce/sktime
|
6336247bad0dac8692aa4b911c267f401dea4163
|
[
"BSD-3-Clause"
] | 1 |
2020-09-02T19:39:59.000Z
|
2020-09-02T19:39:59.000Z
|
sktime/benchmarking/evaluation.py
|
brettkoonce/sktime
|
6336247bad0dac8692aa4b911c267f401dea4163
|
[
"BSD-3-Clause"
] | 2 |
2020-04-20T12:26:42.000Z
|
2020-04-22T17:09:14.000Z
|
sktime/benchmarking/evaluation.py
|
brettkoonce/sktime
|
6336247bad0dac8692aa4b911c267f401dea4163
|
[
"BSD-3-Clause"
] | 1 |
2022-02-14T18:19:01.000Z
|
2022-02-14T18:19:01.000Z
|
__author__ = ["Viktor Kazakov", "Markus Löning", "Aaron Bostrom"]
__all__ = ["Evaluator"]
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import ranksums
from scipy.stats import ttest_ind
from sktime.benchmarking.base import BaseResults
from sktime.exceptions import NotEvaluatedError
plt.style.use("seaborn-ticks")
class Evaluator:
"""
Analyze results of machine learning experiments.
"""
def __init__(self, results):
if not isinstance(results, BaseResults):
raise ValueError("`results` must inherit from BaseResults")
self.results = results
self._metric_dicts = []
# preallocate dataframe for metrics
self._metrics = pd.DataFrame(
columns=["dataset", "strategy", "cv_fold"])
self._metrics_by_strategy_dataset = pd.DataFrame(
columns=["dataset", "strategy"])
self._metrics_by_strategy = pd.DataFrame(columns=["strategy"])
# keep track of metric names
self._metric_names = []
@property
def metric_names(self):
return self._metric_names
@property
def metrics(self):
self._check_is_evaluated()
return self._metrics
@property
def metrics_by_strategy(self):
self._check_is_evaluated()
return self._metrics_by_strategy
@property
def metrics_by_strategy_dataset(self):
self._check_is_evaluated()
return self._metrics_by_strategy_dataset
def evaluate(self, metric, train_or_test="test", cv_fold="all"):
"""
Calculates the average prediction error per estimator as well as the
prediction error achieved by each
estimator on individual datasets.
"""
# check input
if isinstance(cv_fold, int) and cv_fold >= 0:
cv_folds = [cv_fold] # if single fold, make iterable
elif cv_fold == "all":
cv_folds = np.arange(self.results.cv.get_n_splits())
if len(cv_folds) == 0:
raise ValueError()
else:
raise ValueError(
f"`cv_fold` must be either positive integer (>=0) or 'all', "
f"but found: {type(cv_fold)}")
# load all predictions
for cv_fold in cv_folds:
for result in self.results.load_predictions(
cv_fold=cv_fold,
train_or_test=train_or_test):
# unwrap result object
strategy_name = result.strategy_name
dataset_name = result.dataset_name
# index = result.index
y_true = result.y_true
y_pred = result.y_pred
# y_proba = result.y_proba
# compute metric
mean, stderr = metric.compute(y_true, y_pred)
# store results
metric_dict = {
"dataset": dataset_name,
"strategy": strategy_name,
"cv_fold": cv_fold,
self._get_column_name(metric.name, suffix="mean"): mean,
self._get_column_name(metric.name, suffix="stderr"): stderr
}
self._metric_dicts.append(metric_dict)
# update metrics dataframe with computed metrics
metrics = pd.DataFrame(self._metric_dicts)
self._metrics = self._metrics.merge(metrics, how="outer")
# aggregate results
# aggregate over cv folds
metrics_by_strategy_dataset = self._metrics.groupby(
["dataset", "strategy"], as_index=False).agg(np.mean).drop(
columns="cv_fold")
self._metrics_by_strategy_dataset = \
self._metrics_by_strategy_dataset.merge(
metrics_by_strategy_dataset,
how="outer")
# aggregate over cv folds and datasets
metrics_by_strategy = metrics_by_strategy_dataset.groupby(
["strategy"], as_index=False).agg(np.mean)
self._metrics_by_strategy = self._metrics_by_strategy.merge(
metrics_by_strategy, how="outer")
# append metric names
self._metric_names.append(metric.name)
# return aggregated results
return self._metrics_by_strategy
def plot_boxplots(self, metric_name=None, **kwargs):
"""Box plot of metric"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
column = self._get_column_name(metric_name, suffix="mean")
fig, ax = plt.subplots(1)
self.metrics_by_strategy_dataset.boxplot(by="strategy", column=column,
grid=False, ax=ax, **kwargs)
ax.set(title=f"{metric_name} by strategy", xlabel="strategies",
ylabel=metric_name)
fig.suptitle(None)
plt.tight_layout()
return fig, ax
def rank(self, metric_name=None, ascending=False):
"""
Calculates the average ranks based on the performance of each
estimator on each dataset
"""
self._check_is_evaluated()
if not isinstance(ascending, bool):
raise ValueError(
f"`ascending` must be boolean, but found: {type(ascending)}")
metric_name = self._validate_metric_name(metric_name)
column = self._get_column_name(metric_name, suffix="mean")
ranked = (self.metrics_by_strategy_dataset
.loc[:, ["dataset", "strategy", column]]
.set_index("strategy")
.groupby("dataset")
.rank(ascending=ascending)
.reset_index()
.groupby("strategy")
.mean()
.rename(columns={column: f"{metric_name}_mean_rank"})
.reset_index())
return ranked
def t_test(self, metric_name=None):
"""
Runs t-test on all possible combinations between the estimators.
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
metrics_per_estimator_dataset = \
self._get_metrics_per_estimator_dataset(
metric_name)
t_df = pd.DataFrame()
perms = itertools.product(metrics_per_estimator_dataset.keys(),
repeat=2)
values = np.array([])
for perm in perms:
x = np.array(metrics_per_estimator_dataset[perm[0]])
y = np.array(metrics_per_estimator_dataset[perm[1]])
t_stat, p_val = ttest_ind(x, y)
t_test = {
"estimator_1": perm[0],
"estimator_2": perm[1],
"t_stat": t_stat,
"p_val": p_val
}
t_df = t_df.append(t_test, ignore_index=True)
values = np.append(values, t_stat)
values = np.append(values, p_val)
index = t_df["estimator_1"].unique()
values_names = ["t_stat", "p_val"]
col_idx = pd.MultiIndex.from_product([index, values_names])
values_reshaped = values.reshape(len(index),
len(values_names) * len(index))
values_df_multiindex = pd.DataFrame(values_reshaped, index=index,
columns=col_idx)
return t_df, values_df_multiindex
def sign_test(self, metric_name=None):
"""
Non-parametric test for test for consistent differences between
pairs of observations.
See `<https://en.wikipedia.org/wiki/Sign_test>`_ for details about
the test and
`<https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy
.stats.binom_test.html>`_
for details about the scipy implementation.
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
metrics_per_estimator_dataset = \
self._get_metrics_per_estimator_dataset(
metric_name)
sign_df = pd.DataFrame()
perms = itertools.product(metrics_per_estimator_dataset.keys(),
repeat=2)
for perm in perms:
x = np.array(metrics_per_estimator_dataset[perm[0]])
y = np.array(metrics_per_estimator_dataset[perm[1]])
signs = np.sum([i[0] > i[1] for i in zip(x, y)])
n = len(x)
p_val = stats.binom_test(signs, n)
sign_test = {
"estimator_1": perm[0],
"estimator_2": perm[1],
"p_val": p_val
}
sign_df = sign_df.append(sign_test, ignore_index=True)
sign_df_pivot = sign_df.pivot(index="estimator_1",
columns="estimator_2",
values="p_val")
return sign_df, sign_df_pivot
def ranksum_test(self, metric_name=None):
"""
Non-parametric test for testing consistent differences between pairs
of obeservations.
The test counts the number of observations that are greater, smaller
and equal to the mean
`<http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test>`_.
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
metrics_per_estimator_dataset = \
self._get_metrics_per_estimator_dataset(
metric_name)
ranksum_df = pd.DataFrame()
perms = itertools.product(metrics_per_estimator_dataset.keys(),
repeat=2)
values = np.array([])
for perm in perms:
x = metrics_per_estimator_dataset[perm[0]]
y = metrics_per_estimator_dataset[perm[1]]
t_stat, p_val = ranksums(x, y)
ranksum = {
"estimator_1": perm[0],
"estimator_2": perm[1],
"t_stat": t_stat,
"p_val": p_val
}
ranksum_df = ranksum_df.append(ranksum, ignore_index=True)
values = np.append(values, t_stat)
values = np.append(values, p_val)
index = ranksum_df["estimator_1"].unique()
values_names = ["t_stat", "p_val"]
col_idx = pd.MultiIndex.from_product([index, values_names])
values_reshaped = values.reshape(len(index),
len(values_names) * len(index))
values_df_multiindex = pd.DataFrame(values_reshaped, index=index,
columns=col_idx)
return ranksum_df, values_df_multiindex
def t_test_with_bonferroni_correction(self, metric_name=None, alpha=0.05):
"""
correction used to counteract multiple comparissons
https://en.wikipedia.org/wiki/Bonferroni_correction
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
df_t_test, _ = self.t_test(metric_name=metric_name)
idx_estim_1 = df_t_test["estimator_1"].unique()
idx_estim_2 = df_t_test["estimator_2"].unique()
estim_1 = len(idx_estim_1)
estim_2 = len(idx_estim_2)
critical_value = alpha / (estim_1 * estim_2)
bonfer_test = df_t_test["p_val"] <= critical_value
bonfer_test_reshaped = bonfer_test.values.reshape(estim_1, estim_2)
bonfer_df = pd.DataFrame(bonfer_test_reshaped, index=idx_estim_1,
columns=idx_estim_2)
return bonfer_df
def wilcoxon_test(self, metric_name=None):
"""http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
`Wilcoxon signed-rank test
<https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test>`_.
Tests whether two related paired samples come from the same
distribution.
In particular, it tests whether the distribution of the differences
x-y is symmetric about zero
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
metrics_per_estimator_dataset = \
self._get_metrics_per_estimator_dataset(
metric_name)
wilcoxon_df = pd.DataFrame()
values = np.array([])
prod = itertools.product(metrics_per_estimator_dataset.keys(),
repeat=2)
for p in prod:
estim_1 = p[0]
estim_2 = p[1]
w, p_val = stats.wilcoxon(metrics_per_estimator_dataset[p[0]],
metrics_per_estimator_dataset[p[1]])
w_test = {
"estimator_1": estim_1,
"estimator_2": estim_2,
"statistic": w,
"p_val": p_val
}
wilcoxon_df = wilcoxon_df.append(w_test, ignore_index=True)
values = np.append(values, w)
values = np.append(values, p_val)
index = wilcoxon_df["estimator_1"].unique()
values_names = ["statistic", "p_val"]
col_idx = pd.MultiIndex.from_product([index, values_names])
values_reshaped = values.reshape(len(index),
len(values_names) * len(index))
values_df_multiindex = pd.DataFrame(values_reshaped, index=index,
columns=col_idx)
return wilcoxon_df, values_df_multiindex
def friedman_test(self, metric_name=None):
"""
The Friedman test is a non-parametric statistical test used to
detect differences
in treatments across multiple test attempts. The procedure involves
ranking each row (or block) together,
then considering the values of ranks by columns.
Implementation used:
`scipy.stats <https://docs.scipy.org/doc/scipy-0.15.1/reference
/generated/scipy.stats.friedmanchisquare.html>`_.
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
metrics_per_estimator_dataset = \
self._get_metrics_per_estimator_dataset(
metric_name)
friedman_test = stats.friedmanchisquare(
*[metrics_per_estimator_dataset[k] for k in
metrics_per_estimator_dataset.keys()])
values = [friedman_test[0], friedman_test[1]]
values_df = pd.DataFrame([values], columns=["statistic", "p_value"])
return friedman_test, values_df
def nemenyi(self, metric_name=None):
"""
Post-hoc test run if the `friedman_test` reveals statistical
significance.
For more information see `Nemenyi test
<https://en.wikipedia.org/wiki/Nemenyi_test>`_.
Implementation used `scikit-posthocs
<https://github.com/maximtrp/scikit-posthocs>`_.
"""
# lazy import to avoid hard dependency
from scikit_posthocs import posthoc_nemenyi
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
metrics_per_estimator_dataset = \
self._get_metrics_per_estimator_dataset(
metric_name)
strategy_dict = pd.DataFrame(metrics_per_estimator_dataset)
strategy_dict = strategy_dict.melt(var_name="groups",
value_name="values")
nemenyi = posthoc_nemenyi(strategy_dict, val_col="values",
group_col="groups")
return nemenyi
def plot_critical_difference_diagram(self, metric_name=None, alpha=0.1):
"""Plot critical difference diagrams
References:
-----------
original implementation by Aaron Bostrom, modified by Markus Löning
"""
self._check_is_evaluated()
metric_name = self._validate_metric_name(metric_name)
column = self._get_column_name(metric_name, suffix="mean")
data = (self.metrics_by_strategy_dataset
.copy()
.loc[:, ["dataset", "strategy", column]]
.pivot(index="strategy", columns="dataset", values=column)
.values
)
n_strategies, n_datasets = data.shape # [N,k] = size(s); correct
labels = self.results.strategy_names
r = np.argsort(data, axis=0)
S = np.sort(data, axis=0)
idx = n_strategies * np.tile(np.arange(n_datasets),
(n_strategies, 1)).T + r.T
R = np.asfarray(np.tile(np.arange(n_strategies) + 1, (n_datasets, 1)))
S = S.T
for i in range(n_datasets):
for j in range(n_strategies):
index = S[i, j] == S[i, :]
R[i, index] = np.mean(R[i, index], dtype=np.float64)
r = np.asfarray(r)
r.T.flat[idx] = R
r = r.T
if alpha == 0.01:
qalpha = [0.000, 2.576, 2.913, 3.113, 3.255, 3.364, 3.452, 3.526,
3.590, 3.646, 3.696, 3.741, 3.781, 3.818,
3.853, 3.884, 3.914, 3.941, 3.967, 3.992, 4.015, 4.037,
4.057, 4.077, 4.096, 4.114, 4.132, 4.148,
4.164, 4.179, 4.194, 4.208, 4.222, 4.236, 4.249, 4.261,
4.273, 4.285, 4.296, 4.307, 4.318, 4.329,
4.339, 4.349, 4.359, 4.368, 4.378, 4.387, 4.395, 4.404,
4.412, 4.420, 4.428, 4.435, 4.442, 4.449,
4.456]
elif alpha == 0.05:
qalpha = [0.000, 1.960, 2.344, 2.569, 2.728, 2.850, 2.948, 3.031,
3.102, 3.164, 3.219, 3.268, 3.313, 3.354,
3.391, 3.426, 3.458, 3.489, 3.517, 3.544, 3.569, 3.593,
3.616, 3.637, 3.658, 3.678, 3.696, 3.714,
3.732, 3.749, 3.765, 3.780, 3.795, 3.810, 3.824, 3.837,
3.850, 3.863, 3.876, 3.888, 3.899, 3.911,
3.922, 3.933, 3.943, 3.954, 3.964, 3.973, 3.983, 3.992,
4.001, 4.009, 4.017, 4.025, 4.032, 4.040,
4.046]
elif alpha == 0.1:
qalpha = [0.000, 1.645, 2.052, 2.291, 2.460, 2.589, 2.693, 2.780,
2.855, 2.920, 2.978, 3.030, 3.077, 3.120,
3.159, 3.196, 3.230, 3.261, 3.291, 3.319, 3.346, 3.371,
3.394, 3.417, 3.439, 3.459, 3.479, 3.498,
3.516, 3.533, 3.550, 3.567, 3.582, 3.597, 3.612, 3.626,
3.640, 3.653, 3.666, 3.679, 3.691, 3.703,
3.714, 3.726, 3.737, 3.747, 3.758, 3.768, 3.778, 3.788,
3.797, 3.806, 3.814, 3.823, 3.831, 3.838,
3.846]
else:
raise Exception("alpha must be 0.01, 0.05 or 0.1")
cd = qalpha[n_strategies - 1] * np.sqrt(
n_strategies * (n_strategies + 1) / (6 * n_datasets))
# set up plot
fig, ax = plt.subplots(1)
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(0, 140)
ax.set_axis_off()
tics = np.tile(np.array(np.arange(n_strategies)) / (n_strategies - 1),
(3, 1))
plt.plot(tics.flatten("F"),
np.tile([100, 105, 100], (1, n_strategies)).flatten(),
linewidth=2, color="black")
tics = np.tile(
(np.array(range(0, n_strategies - 1)) / (
n_strategies - 1)) + 0.5 / (n_strategies - 1), (3, 1))
plt.plot(tics.flatten("F"),
np.tile([100, 102.5, 100], (1, n_strategies - 1)).flatten(),
linewidth=1, color="black")
plt.plot([0, 0, 0, cd / (n_strategies - 1), cd / (n_strategies - 1),
cd / (n_strategies - 1)],
[127, 123, 125, 125, 123, 127], linewidth=1,
color="black")
plt.text(0.5 * cd / (n_strategies - 1), 130, "CD", fontsize=12,
horizontalalignment="center")
for i in range(n_strategies):
plt.text(i / (n_strategies - 1), 110, str(n_strategies - i),
fontsize=12, horizontalalignment="center")
# compute average ranks
r = np.mean(r, axis=0)
idx = np.argsort(r, axis=0)
r = np.sort(r, axis=0)
# compute statistically similar cliques
clique = np.tile(r, (n_strategies, 1)) - np.tile(np.vstack(r.T),
(1, n_strategies))
clique[clique < 0] = np.inf
clique = clique < cd
for i in range(n_strategies - 1, 0, -1):
if np.all(clique[i - 1, clique[i, :]] == clique[i, clique[i, :]]):
clique[i, :] = 0
n = np.sum(clique, 1)
clique = clique[n > 1, :]
n = np.size(clique, 0)
for i in range(np.int(np.ceil(n_strategies / 2))):
plt.plot([(n_strategies - r[i]) / (n_strategies - 1),
(n_strategies - r[i]) / (n_strategies - 1), 1.2],
[100, 100 - 5 * (n + 1) - 10 * (i + 1),
100 - 5 * (n + 1) - 10 * (i + 1)], color="black")
plt.text(1.2, 100 - 5 * (n + 1) - 10 * (i + 1) + 2, "%.2f" % r[i],
fontsize=10, horizontalalignment="right")
plt.text(1.25, 100 - 5 * (n + 1) - 10 * (i + 1), labels[idx[i]],
fontsize=12, verticalalignment="center",
horizontalalignment="left")
# labels displayed on the left
for i in range(np.int(np.ceil(n_strategies / 2)), n_strategies):
plt.plot([(n_strategies - r[i]) / (n_strategies - 1),
(n_strategies - r[i]) / (n_strategies - 1), -0.2],
[100, 100 - 5 * (n + 1) - 10 * (n_strategies - i),
100 - 5 * (n + 1) - 10 * (n_strategies - i)],
color="black")
plt.text(-0.2, 100 - 5 * (n + 1) - 10 * (n_strategies - i) + 2,
"%.2f" % r[i], fontsize=10,
horizontalalignment="left")
plt.text(-0.25, 100 - 5 * (n + 1) - 10 * (n_strategies - i),
labels[idx[i]], fontsize=12,
verticalalignment="center", horizontalalignment="right")
# group cliques of statistically similar classifiers
for i in range(np.size(clique, 0)):
R = r[clique[i, :]]
plt.plot([
((n_strategies - np.min(R)) / (n_strategies - 1)) + 0.015,
((n_strategies - np.max(R)) / (n_strategies - 1)) - 0.015
], [100 - 5 * (i + 1), 100 - 5 * (i + 1)], linewidth=6,
color="black")
plt.show()
return fig, ax
def _get_column_name(self, metric_name, suffix="mean"):
"""Helper function to get column name in computed metrics dataframe"""
return f"{metric_name}_{suffix}"
def _check_is_evaluated(self):
"""Check if evaluator has evaluated any metrics"""
if len(self._metric_names) == 0:
raise NotEvaluatedError(
"This evaluator has not evaluated any metric yet. Please call "
"'evaluate' with the appropriate arguments before using this "
"method.")
def _validate_metric_name(self, metric_name):
"""Check if metric has already been evaluated"""
if metric_name is None:
metric_name = self._metric_names[
-1] # if None, use the last evaluated metric
if metric_name not in self._metric_names:
raise ValueError(
f"{metric_name} has not been evaluated yet. Please call "
f"'evaluate' with the appropriate arguments first")
return metric_name
def _get_metrics_per_estimator_dataset(self, metric_name):
"""Helper function to get old format back, to be deprecated"""
# TODO deprecate in favor of new pandas data frame based data
# representation
column = f"{metric_name}_mean"
df = self.metrics_by_strategy_dataset.loc[:, ["strategy", "dataset",
column]].set_index(
"strategy")
d = {}
for strategy in df.index:
val = df.loc[strategy, column].tolist()
val = [val] if not isinstance(val, list) else val
d[strategy] = val
return d
def _get_metrics_per_estimator(self, metric_name):
"""Helper function to get old format back, to be deprecated"""
# TODO deprecate in favor of new pandas data frame based data
# representation
columns = ["strategy", "dataset", f"{metric_name}_mean",
f"{metric_name}_stderr"]
df = self.metrics_by_strategy_dataset.loc[:, columns]
d = {}
for dataset in df.dataset.unique():
results = []
for strategy in df.strategy.unique():
row = df.loc[
(df.strategy == strategy) & (df.dataset == dataset), :]
m = row["accuracy_mean"].values[0]
s = row["accuracy_stderr"].values[0]
results.append([strategy, m, s])
d[dataset] = results
return d
| 40.536 | 79 | 0.552122 |
4a1aff91e7fac7cab3d7a78ff13185b5b0136043
| 920 |
py
|
Python
|
docs/conf.py
|
sandialabs/ufjc
|
cfaaf3ccc5af7eefb2c3f8ffc0ee09523f24bd4a
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
sandialabs/ufjc
|
cfaaf3ccc5af7eefb2c3f8ffc0ee09523f24bd4a
|
[
"BSD-3-Clause"
] | 1 |
2022-03-22T16:22:05.000Z
|
2022-03-22T16:22:05.000Z
|
docs/conf.py
|
sandialabs/ufjc
|
cfaaf3ccc5af7eefb2c3f8ffc0ee09523f24bd4a
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
project = 'uFJC'
author = 'Michael R. Buche, Scott J. Grutzik'
copyright = '2022 National Technology & Engineering Solutions of Sandia, \
LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, \
the U.S. Government retains certain rights in this software'
templates_path = ['_templates']
html_static_path = ['_static']
html_theme = 'sphinx_rtd_theme'
html_theme_options = {'navigation_depth': 8}
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive',
'sphinxcontrib.bibtex'
]
latex_engine = 'xelatex'
bibtex_bibfiles = ['main.bib']
bibtex_default_style = 'plain'
plot_html_show_formats = False
plot_html_show_source_link = False
plot_include_source = True
add_module_names = False
plot_rcparams = {'font.size': 10}
plot_formats = [('png', 300)]
| 29.677419 | 74 | 0.733696 |
4a1affea23e5fcddb4bb64d99df74bac6ccf77e9
| 3,174 |
py
|
Python
|
sandbox/apps/python/multigrid/jacobi3D/builder.py
|
bollu/polymage
|
517657142cc3ae74e9daff3b41a0257d6a4ce2b6
|
[
"Apache-2.0"
] | 10 |
2016-07-22T06:53:11.000Z
|
2021-02-19T06:22:00.000Z
|
sandbox/apps/python/multigrid/jacobi3D/builder.py
|
bollu/polymage
|
517657142cc3ae74e9daff3b41a0257d6a4ce2b6
|
[
"Apache-2.0"
] | null | null | null |
sandbox/apps/python/multigrid/jacobi3D/builder.py
|
bollu/polymage
|
517657142cc3ae74e9daff3b41a0257d6a4ce2b6
|
[
"Apache-2.0"
] | 2 |
2017-11-21T20:29:36.000Z
|
2021-05-21T01:52:05.000Z
|
from __init__ import *
import sys
import subprocess
sys.path.insert(0, ROOT+'apps/python/')
from cpp_compiler import c_compile
from loader import load_lib
from polymage_vcycle import v_cycle
from polymage_wcycle import w_cycle
from compiler import *
from constructs import *
def code_gen(pipe, file_name, app_data):
print("")
print("[builder]: writing the code to", file_name, "...")
code = pipe.generate_code(is_extern_c_func=True,
are_io_void_ptrs=True)
f = open(file_name, 'w')
f.write(code.__str__())
f.close()
return
def generate_graph(pipe, file_name, app_data):
graph_file = file_name+".dot"
png_graph = file_name+".png"
print("")
print("[builder]: writing the graph dot file to", graph_file, "...")
graph = pipe.pipeline_graph
graph.write(graph_file)
print("[builder]: ... DONE")
dotty_str = "dot -Tpng "+graph_file+" -o "+png_graph
print("")
print("[builder]: drawing the graph using dotty to", png_graph)
print(">", dotty_str)
subprocess.check_output(dotty_str, shell=True)
print("[builder]: ... DONE")
return
def build_mg_cycle(app_data):
pipe_data = app_data['pipe_data']
cycle_type = app_data['cycle']
if cycle_type == 'V':
# construct the multigrid v-cycle pipeline
mg = v_cycle(app_data)
elif cycle_type == 'W':
# construct the multigrid w-cycle pipeline
mg = w_cycle(app_data)
n = pipe_data['n']
live_outs = [mg]
pipe_name = app_data['cycle_name']
p_estimates = [(n, app_data['n'])]
p_constraints = [ Condition(n, "==", app_data['n']) ]
t_size = [8, 8, 32]
g_size = 6
opts = []
if app_data['early_free']:
opts += ['early_free']
if app_data['optimize_storage']:
opts += ['optimize_storage']
if app_data['pool_alloc']:
opts += ['pool_alloc']
if app_data['multipar']:
opts += ['multipar']
mg_pipe = buildPipeline(live_outs,
param_estimates=p_estimates,
param_constraints=p_constraints,
tile_sizes = t_size,
group_size = g_size,
options = opts,
pipe_name = pipe_name)
return mg_pipe
def create_lib(build_func, pipe_name, app_data):
mode = app_data['mode']
app_args = app_data['app_args']
pipe_src = pipe_name+".cpp"
pipe_so = pipe_name+".so"
graph_gen = app_data['graph_gen']
if build_func != None:
if mode == 'new':
# build the polymage pipeline
pipe = build_func(app_data)
# draw the pipeline graph to a png file
if graph_gen:
generate_graph(pipe, pipe_name, app_data)
# generate pipeline cpp source
code_gen(pipe, pipe_src, app_data)
#fi
#fi
if mode != 'ready':
# compile the cpp code
c_compile(pipe_src, pipe_so, app_data)
#fi
# load the shared library
lib_func_name = "pipeline_"+pipe_name
load_lib(pipe_so, lib_func_name, app_data)
return
| 26.45 | 72 | 0.597984 |
4a1b001d2b36301f84ff069cca47108aaca79809
| 10,274 |
py
|
Python
|
build/plugins/gobuild.py
|
notimesea/catboost
|
1d3e0744f1d6c6d74d724878dc9fe92076c8b1ce
|
[
"Apache-2.0"
] | 2 |
2019-08-11T22:21:41.000Z
|
2019-08-28T23:40:44.000Z
|
build/plugins/gobuild.py
|
TakeOver/stochasticrank
|
45f9c701785bb952c59c704a2bfe878d5cbb01e4
|
[
"Apache-2.0"
] | null | null | null |
build/plugins/gobuild.py
|
TakeOver/stochasticrank
|
45f9c701785bb952c59c704a2bfe878d5cbb01e4
|
[
"Apache-2.0"
] | 1 |
2020-10-17T09:28:08.000Z
|
2020-10-17T09:28:08.000Z
|
import base64
import itertools
import md5
import os
from _common import rootrel_arc_src, tobuilddir
import ymake
runtime_cgo_path = os.path.join('runtime', 'cgo')
runtime_msan_path = os.path.join('runtime', 'msan')
runtime_race_path = os.path.join('runtime', 'race')
arc_project_prefix = 'a.yandex-team.ru/'
import_runtime_cgo_false = {
'norace': (runtime_cgo_path, runtime_msan_path, runtime_race_path),
'race': (runtime_cgo_path, runtime_msan_path),
}
import_syscall_false = {
'norace': (runtime_cgo_path),
'race': (runtime_cgo_path, runtime_race_path),
}
def get_appended_values(unit, key):
value = []
raw_value = unit.get(key)
if raw_value:
value = filter(lambda x: len(x) > 0, raw_value.split(' '))
assert len(value) == 0 or value[0] == '$' + key
return value[1:] if len(value) > 0 else value
def compare_versions(version1, version2):
v1 = tuple(str(int(x)).zfill(8) for x in version1.split('.'))
v2 = tuple(str(int(x)).zfill(8) for x in version2.split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def go_package_name(unit):
name = unit.get('GO_PACKAGE_VALUE')
if not name:
name = unit.get('GO_TEST_IMPORT_PATH')
if name:
name = os.path.basename(os.path.normpath(name))
elif unit.get('MODULE_TYPE') == 'PROGRAM':
name = 'main'
else:
name = unit.get('REALPRJNAME')
return name
def need_lint(path):
return not path.startswith('$S/vendor/') and not path.startswith('$S/contrib/')
def on_go_process_srcs(unit):
"""
_GO_PROCESS_SRCS() macro processes only 'CGO' files. All remaining *.go files
and other input files are currently processed by a link command of the
GO module (GO_LIBRARY, GO_PROGRAM)
"""
srcs_files = get_appended_values(unit, 'GO_SRCS_VALUE')
asm_files = []
c_files = []
cxx_files = []
ev_files = []
go_files = []
in_files = []
proto_files = []
s_files = []
syso_files = []
classifed_files = {
'.c': c_files,
'.cc': cxx_files,
'.cpp': cxx_files,
'.cxx': cxx_files,
'.ev': ev_files,
'.go': go_files,
'.in': in_files,
'.proto': proto_files,
'.s': asm_files,
'.syso': syso_files,
'.C': cxx_files,
'.S': s_files,
}
# Classify files specifed in _GO_SRCS() macro by extension and process CGO_EXPORT keyword
# which can preceed C/C++ files only
is_cgo_export = False
for f in srcs_files:
_, ext = os.path.splitext(f)
ext_files = classifed_files.get(ext)
if ext_files is not None:
if is_cgo_export:
is_cgo_export = False
if ext in ('.c', '.cc', '.cpp', '.cxx', '.C'):
unit.oncopy_file_with_context([f, f, 'OUTPUT_INCLUDES', '${BINDIR}/_cgo_export.h'])
f = '${BINDIR}/' + f
else:
ymake.report_configure_error('Unmatched CGO_EXPORT keyword in SRCS()/_GO_SRCS() macro')
ext_files.append(f)
elif f == 'CGO_EXPORT':
is_cgo_export = True
else:
# FIXME(snermolaev): We can report an unsupported files for _GO_SRCS here
pass
if is_cgo_export:
ymake.report_configure_error('Unmatched CGO_EXPORT keyword in SRCS()/_GO_SRCS() macro')
for f in go_files:
if f.endswith('_test.go'):
ymake.report_configure_error('file {} must be listed in GO_TEST_SRCS() or GO_XTEST_SRCS() macros'.format(f))
go_test_files = get_appended_values(unit, 'GO_TEST_SRCS_VALUE')
go_xtest_files = get_appended_values(unit, 'GO_XTEST_SRCS_VALUE')
for f in go_test_files + go_xtest_files:
if not f.endswith('_test.go'):
ymake.report_configure_error('file {} should not be listed in GO_TEST_SRCS() or GO_XTEST_SRCS() macros'.format(f))
is_test_module = unit.enabled('GO_TEST_MODULE')
# Add gofmt style checks
if unit.enabled('_GO_FMT_ADD_CHECK'):
resolved_go_files = []
go_source_files = [] if is_test_module and unit.get(['GO_TEST_FOR_DIR']) else go_files
for path in itertools.chain(go_source_files, go_test_files, go_xtest_files):
if path.endswith('.go'):
resolved = unit.resolve_arc_path([path])
if resolved != path and need_lint(resolved):
resolved_go_files.append(resolved)
if resolved_go_files:
basedirs = {}
for f in resolved_go_files:
basedir = os.path.dirname(f)
if basedir not in basedirs:
basedirs[basedir] = []
basedirs[basedir].append(f)
for basedir in basedirs:
unit.onadd_check(['gofmt'] + basedirs[basedir])
# Go coverage instrumentation (NOTE! go_files list is modified here)
if is_test_module and unit.enabled('GO_TEST_COVER'):
cover_info = []
for f in go_files:
if f.endswith('_test.go'):
continue
cover_var = 'GoCover' + base64.b32encode(f).rstrip('=')
cover_file = unit.resolve_arc_path(f)
unit.on_go_gen_cover_go([cover_file, cover_var])
if cover_file.startswith('$S/'):
cover_file = arc_project_prefix + cover_file[3:]
cover_info.append('{}:{}'.format(cover_var, cover_file))
# go_files should be empty now since the initial list shouldn't contain
# any non-go or go test file. The value of go_files list will be used later
# to update the value of GO_SRCS_VALUE
go_files = []
unit.set(['GO_COVER_INFO_VALUE', ' '.join(cover_info)])
# We have cleaned up the list of files from GO_SRCS_VALUE var and we have to update
# the value since it is used in module command line
unit.set(['GO_SRCS_VALUE', ' '.join(itertools.chain(go_files, asm_files, syso_files))])
unit_path = unit.path()
# Add go vet check
if unit.enabled('_GO_VET_ADD_CHECK') and need_lint(unit_path):
vet_report_file_name = os.path.join(unit_path, '{}{}'.format(unit.filename(), unit.get('GO_VET_REPORT_EXT')))
unit.onadd_check(["govet", '$(BUILD_ROOT)/' + tobuilddir(vet_report_file_name)[3:]])
for f in ev_files:
ev_proto_file = '{}.proto'.format(f)
unit.oncopy_file_with_context([f, ev_proto_file])
proto_files.append(ev_proto_file)
# Process .proto files
for f in proto_files:
unit.on_go_proto_cmd(f)
# Process .in files
for f in in_files:
unit.onsrc(f)
# Generate .symabis for .s files (starting from 1.12 version)
if compare_versions('1.12', unit.get('GOSTD_VERSION')) >= 0 and len(asm_files) > 0:
unit.on_go_compile_symabis(asm_files)
# Process cgo files
cgo_files = get_appended_values(unit, 'CGO_SRCS_VALUE')
cgo_cflags = []
if len(c_files) + len(cxx_files) + len(s_files) + len(cgo_files) > 0:
if is_test_module:
go_test_for_dir = unit.get('GO_TEST_FOR_DIR')
if go_test_for_dir and go_test_for_dir.startswith('$S/'):
cgo_cflags.append(os.path.join('-I${ARCADIA_ROOT}', go_test_for_dir[3:]))
cgo_cflags.append('-I$CURDIR')
unit.oncgo_cflags(cgo_cflags)
cgo_cflags = get_appended_values(unit, 'CGO_CFLAGS_VALUE')
for f in itertools.chain(c_files, cxx_files, s_files):
unit.onsrc([f] + cgo_cflags)
if len(cgo_files) > 0:
if not unit.enabled('CGO_ENABLED'):
ymake.report_configure_error('trying to build with CGO (CGO_SRCS is non-empty) when CGO is disabled')
import_path = rootrel_arc_src(unit_path, unit)
go_std_root = unit.get('GOSTD') + os.path.sep
if import_path.startswith(go_std_root):
import_path = import_path[len(go_std_root):]
if import_path != runtime_cgo_path:
unit.onpeerdir(os.path.join(go_std_root, runtime_cgo_path))
race_mode = 'race' if unit.enabled('RACE') else 'norace'
import_runtime_cgo = 'false' if import_path in import_runtime_cgo_false[race_mode] else 'true'
import_syscall = 'false' if import_path in import_syscall_false[race_mode] else 'true'
args = [import_path] + cgo_files + ['FLAGS', '-import_runtime_cgo=' + import_runtime_cgo, '-import_syscall=' + import_syscall]
unit.on_go_compile_cgo1(args)
cgo2_cflags = get_appended_values(unit, 'CGO2_CFLAGS_VALUE')
for f in cgo_files:
if f.endswith('.go'):
unit.onsrc([f[:-2] + 'cgo2.c'] + cgo_cflags + cgo2_cflags)
else:
ymake.report_configure_error('file {} should not be listed in CGO_SRCS() macros'.format(f))
args = [go_package_name(unit)] + cgo_files
if len(c_files) > 0:
args += ['C_FILES'] + c_files
if len(s_files) > 0:
args += ['S_FILES'] + s_files
if len(syso_files) > 0:
args += ['OBJ_FILES'] + syso_files
unit.on_go_compile_cgo2(args)
def on_go_resource(unit, *args):
args = list(args)
files = args[::2]
keys = args[1::2]
suffix_md5 = md5.new('@'.join(args)).hexdigest()
resource_go = os.path.join("resource.{}.res.go".format(suffix_md5))
unit.onpeerdir(["library/go/core/resource"])
if len(files) != len(keys):
ymake.report_configure_error("last file {} is missing resource key".format(files[-1]))
for i, (key, filename) in enumerate(zip(keys, files)):
if not key:
ymake.report_configure_error("file key must be non empty")
return
if filename == "-" and "=" not in key:
ymake.report_configure_error("key \"{}\" must contain = sign".format(key))
return
# quote key, to avoid automatic substitution of filename by absolute
# path in RUN_PROGRAM
args[2*i+1] = "notafile" + args[2*i+1]
files = [file for file in files if file != "-"]
unit.onrun_program([
"library/go/core/resource/cc",
"-package", go_package_name(unit),
"-o", resource_go] + list(args) + [
"IN"] + files + [
"OUT", resource_go])
| 38.051852 | 134 | 0.619233 |
4a1b007ac6101853ed6782014e9c1c301c76847a
| 1,965 |
py
|
Python
|
tesla_powerwall/error.py
|
bdraco/tesla_powerwall
|
dbf4493796c13e08fef2e8ddda547ad9ef1e2469
|
[
"Apache-2.0"
] | null | null | null |
tesla_powerwall/error.py
|
bdraco/tesla_powerwall
|
dbf4493796c13e08fef2e8ddda547ad9ef1e2469
|
[
"Apache-2.0"
] | null | null | null |
tesla_powerwall/error.py
|
bdraco/tesla_powerwall
|
dbf4493796c13e08fef2e8ddda547ad9ef1e2469
|
[
"Apache-2.0"
] | null | null | null |
class PowerwallError(Exception):
def __init__(self, msg):
super().__init__(msg)
class APIError(PowerwallError):
def __init__(self, error):
super().__init__("Powerwall api error: {}".format(error))
class MissingAttributeError(APIError):
def __init__(self, response: dict, attribute: str, url: str = None):
self.response = response
self.attribute = attribute
self.url = url
if url is None:
super().__init__(
"The attribute '{}' is expected in the response but is missing.".format(
attribute
)
)
else:
super().__init__(
"The attribute '{}' is expected in the response for '{}' but is missing.".format(
attribute, url
)
)
class PowerwallUnreachableError(PowerwallError):
def __init__(self, reason=None):
msg = "Powerwall is unreachable"
self.reason = reason
if reason is not None:
msg = "{}: {}".format(msg, reason)
super().__init__(msg)
class AccessDeniedError(PowerwallError):
def __init__(self, resource, error=None, message=None):
self.resource = resource
self.error = error
self.message = message
msg = "Access denied for resource {}".format(resource)
if error is not None:
if message is not None:
msg = "{}: {}: {}".format(msg, error, message)
else:
msg = "{}: {}".format(msg, error)
super().__init__(msg)
class MeterNotAvailableError(PowerwallError):
def __init__(self, meter, available_meters):
self.meter = meter
self.available_meters = available_meters
super().__init__(
"Meter {} is not available at your powerwall. Following meters are available: {} ".format(
meter.value, available_meters
)
)
| 31.190476 | 102 | 0.567939 |
4a1b007b3fe733e486c2f26f0976a154b8f3c152
| 3,239 |
py
|
Python
|
example/pybbm_private_messages/settings.py
|
skolsuper/pybbm_private_messages
|
0c89b5b0e07074902c51f7c85c418295118e5933
|
[
"MIT"
] | 2 |
2015-08-30T08:44:05.000Z
|
2015-08-30T08:44:13.000Z
|
example/pybbm_private_messages/settings.py
|
skolsuper/pybbm_private_messages
|
0c89b5b0e07074902c51f7c85c418295118e5933
|
[
"MIT"
] | 1 |
2015-08-29T09:55:28.000Z
|
2015-08-29T09:55:28.000Z
|
example/pybbm_private_messages/settings.py
|
skolsuper/pybbm_private_messages
|
0c89b5b0e07074902c51f7c85c418295118e5933
|
[
"MIT"
] | null | null | null |
"""
Django settings for pybbm_private_messages project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'agg#lu3aj)8r8=2e@!7mn115p$$nf^ue1+@fv!083p$ze#le56'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_select2',
'easy_thumbnails',
'private_messages',
'pybb',
'registration',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pybbm_private_messages.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'base_templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'private_messages.context_processors.unread_messages',
'pybb.context_processors.processor',
],
},
},
]
WSGI_APPLICATION = 'pybbm_private_messages.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
DEBUG_TOOLBAR = {
'JQUERY_URL': 'admin/js/jquery.js'
}
SELECT2_BOOTSTRAP = True
# Pybbm
PYBB_AVATAR_WIDTH = 120
PYBB_AVATAR_HEIGHT = 120
| 25.706349 | 71 | 0.702377 |
4a1b00bb2c504076f63b9f948855ea613e074a5f
| 242 |
py
|
Python
|
allauth/socialaccount/providers/auth0_provider/urls.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/auth0_provider/urls.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/auth0_provider/urls.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from allauth.socialaccount.providers.auth0_provider.provider import Auth0Provider
from allauth.socialaccount.providers.oauth2_provider.urls import default_urlpatterns
urlpatterns = default_urlpatterns(Auth0Provider)
| 34.571429 | 84 | 0.842975 |
4a1b028ae17a1b31ba2ea25ec82b164e403adb1a
| 6,868 |
py
|
Python
|
src/tests/TestJustIntonation.py
|
ytyaru/Python.MusicTheory.Temperament.JustIntonation.201709221413
|
0c4e24a2b35ef985ed3697659525b02667958b34
|
[
"CC0-1.0"
] | null | null | null |
src/tests/TestJustIntonation.py
|
ytyaru/Python.MusicTheory.Temperament.JustIntonation.201709221413
|
0c4e24a2b35ef985ed3697659525b02667958b34
|
[
"CC0-1.0"
] | null | null | null |
src/tests/TestJustIntonation.py
|
ytyaru/Python.MusicTheory.Temperament.JustIntonation.201709221413
|
0c4e24a2b35ef985ed3697659525b02667958b34
|
[
"CC0-1.0"
] | null | null | null |
#!python3.6
import unittest
import math
from MusicTheory.temperament.JustIntonation import JustIntonation
from MusicTheory.temperament.FundamentalTone import FundamentalTone
from MusicTheory.scale.Scale import Scale
from MusicTheory.scale.ScaleIntervals import ScaleIntervals
from MusicTheory.pitch.PitchClass import PitchClass
from MusicTheory.pitch.OctaveClass import OctaveClass
from MusicTheory.pitch.NoteNumber import NoteNumber
import Framework.ConstMeta
"""
JustIntonationのテスト。
"""
class TestJustIntonation(unittest.TestCase):
def test_init_Default(self):
j = JustIntonation()
self.assertTrue(isinstance(j.FundamentalTone, FundamentalTone))
self.assertEqual(440, j.FundamentalTone.Hz)
self.assertEqual(9, j.FundamentalTone.PitchClass)
self.assertEqual(5, j.FundamentalTone.OctaveClass)
self.assertTrue(isinstance(j.Scale, Scale))
self.assertTrue(j.Scale.Key == 0)
self.assertTrue(j.Scale.Intervals == ScaleIntervals.Major)
def test_init_None(self):
j = JustIntonation(None,None)
self.assertTrue(isinstance(j.FundamentalTone, FundamentalTone))
self.assertEqual(440, j.FundamentalTone.Hz)
self.assertEqual(9, j.FundamentalTone.PitchClass)
self.assertEqual(5, j.FundamentalTone.OctaveClass)
self.assertTrue(isinstance(j.Scale, Scale))
self.assertTrue(j.Scale.Key == 0)
self.assertTrue(j.Scale.Intervals == ScaleIntervals.Major)
def test_init_set(self):
f = FundamentalTone(hz=432, pitchClass=9, octaveClass=5)
s = Scale(0, ScaleIntervals.Minor)
j = JustIntonation(f, s)
self.assertTrue(isinstance(j.FundamentalTone, FundamentalTone))
self.assertEqual(432, j.FundamentalTone.Hz)
self.assertEqual(9, j.FundamentalTone.PitchClass)
self.assertEqual(5, j.FundamentalTone.OctaveClass)
self.assertTrue(isinstance(j.Scale, Scale))
self.assertTrue(j.Scale.Key == 0)
self.assertTrue(j.Scale.Intervals == ScaleIntervals.Minor)
del f
with self.assertRaises(ReferenceError) as ex:
print(j.FundamentalTone)
self.assertIn('weakly-referenced object no longer exists', str(ex.exception))
del s
with self.assertRaises(ReferenceError) as ex:
print(j.Scale)
self.assertIn('weakly-referenced object no longer exists', str(ex.exception))
def test_init_Invalid_Type_FundamentalTone(self):
with self.assertRaises(TypeError) as ex:
j = JustIntonation('')
self.assertIn('引数fundamentalToneはFundamentalTone型にしてください。', str(ex.exception))
def test_init_Scale_Type_FundamentalTone(self):
with self.assertRaises(TypeError) as ex:
j = JustIntonation(None, '')
self.assertIn('引数scaleはScale型にしてください。', str(ex.exception))
def test_Get_A4(self):
print('基音=A4(440Hz) 調=A4')
f = FundamentalTone(hz=440, pitchClass=9, octaveClass=5)
s = Scale(9, ScaleIntervals.Major)
j = JustIntonation(f, s)
for p in range(PitchClass.Max+1):
print(j.GetFrequency(p, 5))
#正解の基準がない。出た数値が正解ということにする。
def test_Get_C4(self):
print('基音=A4(440Hz) 調=C4')
f = FundamentalTone(hz=440, pitchClass=9, octaveClass=5)
s = Scale(0, ScaleIntervals.Major)
j = JustIntonation(f, s)
for p in range(PitchClass.Max+1):
print(j.GetFrequency(p, 5))
#正解の基準がない。出た数値が正解ということにする。
print('純正律の周波数において、正解の基準が見つけられなかった。出た数値が正解ということにする。')
"""
def test_Get(self):
print('test_Get')
e = EqualTemperament()
expecteds = [261,277,293,311,329,349,369,391,415,440,466,493]
for p in range(PitchClass.Max+1):
print(e.GetFrequency(p, 5))
self.assertEqual(expecteds[p], math.floor(e.GetFrequency(p, 5)))
def test_Get_MinOctave(self):
print('test_Get_MinOctave')
e = EqualTemperament()
expecteds = [261,277,293,311,329,349,369,391,415,440,466,493]
for p in range(PitchClass.Max+1):
print(e.GetFrequency(p, 0))
self.assertEqual(math.floor(expecteds[p]/math.pow(2,5)), math.floor(e.GetFrequency(p, 0)))
def test_Get_MaxOctave(self):
print('test_Get_MaxOctave')
e = EqualTemperament()
expecteds = [8372,8869,9397,9956,10548,11175,11839,12543]
for p in range(PitchClass.Max+1):
if p + (10 * (PitchClass.Max+1)) < 128:
print(e.GetFrequency(p, 10))
self.assertEqual(expecteds[p], math.floor(e.GetFrequency(p, 10)))
def test_Get_Low(self):
print('test_Get_Low')
e = EqualTemperament()
expecteds = [261,277,293,311,329,349,369,391,415,440,466,493]
for p in range(PitchClass.Max+1):
print(e.GetFrequency(p, 5-1))
self.assertEqual(math.floor(expecteds[p]/2), math.floor(e.GetFrequency(p, 5-1)))
def test_Get_Hi(self):
print('test_Get_Low')
e = EqualTemperament()
expecteds = [261,277,293,311,329,349,369,391,415,440,466,493]
for p in range(PitchClass.Max+1):
print(e.GetFrequency(p, 5+1))
self.assertIn(math.floor(e.GetFrequency(p, 5+1)), [math.floor(expecteds[p]*2), math.floor(expecteds[p]*2)+1])
def test_Get_Invalid_Type_PitchClass(self):
e = EqualTemperament()
with self.assertRaises(TypeError) as ex:
e.GetFrequency('pitch', 5)
self.assertIn('引数pitchClassはint型にしてください。', str(ex.exception))
def test_Get_Invalid_Type_OctaveClass(self):
e = EqualTemperament()
with self.assertRaises(TypeError) as ex:
e.GetFrequency(9, 'octave')
self.assertIn('引数octaveはint型にしてください。', str(ex.exception))
def test_Get_OutOfRange_Pitch_Min(self):
e = EqualTemperament()
with self.assertRaises(ValueError) as ex:
e.GetFrequency(-1, 5)
self.assertIn(f'引数pitchClassは{PitchClass.Min}〜{PitchClass.Max}までの整数値にしてください。', str(ex.exception))
def test_Get_OutOfRange_Pitch_Max(self):
e = EqualTemperament()
with self.assertRaises(ValueError) as ex:
e.GetFrequency(12, 5)
self.assertIn(f'引数pitchClassは{PitchClass.Min}〜{PitchClass.Max}までの整数値にしてください。', str(ex.exception))
def test_Get_OutOfRange_Octave_Min(self):
e = EqualTemperament()
with self.assertRaises(ValueError) as ex:
e.GetFrequency(9, -1)
self.assertIn('引数octaveは0〜10の値にしてください。', str(ex.exception))
def test_Get_OutOfRange_Octave_Max(self):
e = EqualTemperament()
with self.assertRaises(ValueError) as ex:
e.GetFrequency(9, 11)
self.assertIn('引数octaveは0〜10の値にしてください。', str(ex.exception))
"""
if __name__ == '__main__':
unittest.main()
| 44.888889 | 121 | 0.660891 |
4a1b02a3d2b451a8c03590500003f7dfc339fa5b
| 3,332 |
py
|
Python
|
examples/ex_cnn_cascade_training_face_detection/preprocess_negative.py
|
KayaDevSolutions/deepgaze
|
a6d444c70bb75ffcfc23d3b31a0567711fb956a7
|
[
"MIT"
] | 1,653 |
2016-05-06T02:56:08.000Z
|
2022-03-26T16:34:30.000Z
|
examples/ex_cnn_cascade_training_face_detection/preprocess_negative.py
|
KayaDevSolutions/deepgaze
|
a6d444c70bb75ffcfc23d3b31a0567711fb956a7
|
[
"MIT"
] | 94 |
2016-04-09T04:40:08.000Z
|
2022-03-31T08:52:17.000Z
|
examples/ex_cnn_cascade_training_face_detection/preprocess_negative.py
|
KayaDevSolutions/deepgaze
|
a6d444c70bb75ffcfc23d3b31a0567711fb956a7
|
[
"MIT"
] | 514 |
2016-08-28T01:47:56.000Z
|
2022-01-30T13:42:39.000Z
|
#!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io
# https://mpatacchiola.github.io/blog/
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
from six.moves import cPickle as pickle
import cv2
import os
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--img_size', required=True,
help='The size of the images: 12, 24, 48')
parser.add_argument('-i', '--input_directory', required=True,
help='The directory containing the images')
args = vars(parser.parse_args())
img_size = int(args['img_size']) # size to resize the image to
image_list = list()
dataset_path = args['input_directory'] # "./detection/pos_faces"
counter = 1
for root, dirs, files in os.walk(dataset_path):
for filename in files:
if filename.endswith(".jpg"):
image_path = os.path.join(root, filename)
image = cv2.imread(image_path)
image_dimension = image.shape[0]
if image_dimension >= img_size:
print("Image number ..... " + str(counter))
print("Image name ..... " + str(filename))
print("Image dimension ..... " + str(image_dimension))
print("")
image_rescaled = cv2.resize(image, (img_size,img_size), interpolation = cv2.INTER_AREA)
image_list.append(image_rescaled)
counter += 1
else:
print("Image rejected!")
print("Image name ..... " + str(filename))
print("Image dimension ..... " + str(image_dimension))
print("")
# Creating the dataset
tot_images = counter
training_label = np.zeros((tot_images, 2))
training_label[:,1] = 1
training_dataset = np.asarray(image_list)
# Store in pickle
pickle_file = "./negative_dataset_" + str(img_size) + "net_" + str(tot_images) + ".pickle"
print("Saving the dataset in: " + pickle_file)
print("... ")
try:
print("Opening the file...")
f = open(pickle_file, 'wb')
save = {'training_dataset': training_dataset,
'training_label': training_label}
print("Training dataset: ", training_dataset.shape)
print("Training label: ", training_label.shape)
print("Saving the file...")
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
print("Closing the file...")
f.close()
print("")
print("The dataset has been saved and it is ready for the training! \n")
print("")
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
if __name__ == "__main__":
main()
| 39.666667 | 141 | 0.602041 |
4a1b03043b73c3778abaa3869d3682c6dbd77061
| 1,153 |
py
|
Python
|
parsers/pyBSRP/build/lib/bsrp/protocols/clearchanneljson.py
|
uky-transport-data-science/BSR_parsers
|
2d3a34b76481b94c74df8ab406340eb62aac28ab
|
[
"Apache-2.0"
] | 4 |
2019-12-27T13:33:33.000Z
|
2022-01-20T14:08:22.000Z
|
parsers/pyBSRP/build/lib/bsrp/protocols/clearchanneljson.py
|
uky-transport-data-science/BSR_parsers
|
2d3a34b76481b94c74df8ab406340eb62aac28ab
|
[
"Apache-2.0"
] | null | null | null |
parsers/pyBSRP/build/lib/bsrp/protocols/clearchanneljson.py
|
uky-transport-data-science/BSR_parsers
|
2d3a34b76481b94c74df8ab406340eb62aac28ab
|
[
"Apache-2.0"
] | 1 |
2021-03-16T16:20:40.000Z
|
2021-03-16T16:20:40.000Z
|
# Parser: bici (e.g., barcelona)
import json, re
def parse(df, data, utc):
# df is a dict with the following keys:
# [u'feedurl', u'feedname', u'bssid', u'format', u'feedurl2', u'keyreq', u'parsername', u'rid']
# parse out desired info
# does the file have valid content
try:
json_data = json.loads(data)
except ValueError:
print(utc + ' ' + df['bssid'] + " Parsing JSON failed for " + df['feedurl'])
return False
# capture clean results in clean_stations_list
# stnid, lat, lng, docks, bikes, spaces, name
clean_stations_list = []
for stn in json_data:
if stn['status'] == 'OPN':
active = 'yes'
else:
active = 'no'
clean_stations_list.append([stn['id'], stn['lat'], stn['lon'], str(int(stn['bikes']) + int(stn['slots'])), stn['bikes'], stn['slots'], stn['name'].encode('utf8'), active])
# check if we have some data
if len(clean_stations_list) == 0:
print(utc + ' ' + df['bssid'] + " Parser did not find any station's data.")
return False
return clean_stations_list
| 32.942857 | 180 | 0.570685 |
4a1b037f7bfb6c7698b71f965e6759fb806ae1ee
| 17,221 |
py
|
Python
|
kivymd/list.py
|
pigogames/KivyMD
|
54767e9cd9552df19676af9d105700a06b2c8d71
|
[
"MIT"
] | null | null | null |
kivymd/list.py
|
pigogames/KivyMD
|
54767e9cd9552df19676af9d105700a06b2c8d71
|
[
"MIT"
] | null | null | null |
kivymd/list.py
|
pigogames/KivyMD
|
54767e9cd9552df19676af9d105700a06b2c8d71
|
[
"MIT"
] | 1 |
2021-06-08T12:44:30.000Z
|
2021-06-08T12:44:30.000Z
|
"""
Lists
=====
Copyright (c) 2015 Andrés Rodríguez and KivyMD contributors -
KivyMD library up to version 0.1.2
Copyright (c) 2019 Ivanov Yuri and KivyMD contributors -
KivyMD library version 0.1.3 and higher
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
`Material Design spec, Lists <https://material.io/design/components/lists.html>`_
The class :class:`MDList` in combination with a ListItem like
:class:`OneLineListItem` will create a list that expands as items are added to
it, working nicely with Kivy's :class:`~kivy.uix.scrollview.ScrollView`.
Example
-------
Kv Lang:
.. code-block:: python
ScrollView:
do_scroll_x: False # Important for MD compliance
MDList:
OneLineListItem:
text: "Single-line item"
TwoLineListItem:
text: "Two-line item"
secondary_text: "Secondary text here"
ThreeLineListItem:
text: "Three-line item"
secondary_text:
"This is a multi-line label where you can "\
"fit more text than usual"
Python:
.. code-block:: python
# Sets up ScrollView with MDList, as normally used in Android:
sv = ScrollView()
ml = MDList()
sv.add_widget(ml)
contacts = ["Paula", "John", "Kate", "Vlad"]
for c in contacts:
ml.add_widget(
OneLineListItem(
text=c
)
)
Advanced usage
--------------
Due to the variety in sizes and controls in the MD spec, this module suffers
from a certain level of complexity to keep the widgets compliant, flexible
and performant.
For this KivyMD provides ListItems that try to cover the most common usecases,
when those are insufficient, there's a base class called :class:`ListItem`
which you can use to create your own ListItems. This documentation will only
cover the provided ones, for custom implementations please refer to this
module's source code.
Text only ListItems
-------------------
- :class:`~OneLineListItem`
- :class:`~TwoLineListItem`
- :class:`~ThreeLineListItem`
These are the simplest ones. The :attr:`~ListItem.text` attribute changes the
text in the most prominent line, while :attr:`~ListItem.secondary_text`
changes the second and third line.
If there are only two lines, :attr:`~ListItem.secondary_text` will shorten
the text to fit in case it is too long; if a third line is available, it will
instead wrap the text to make use of it.
ListItems with widget containers
--------------------------------
- :class:`~OneLineAvatarListItem`
- :class:`~TwoLineAvatarListItem`
- :class:`~ThreeLineAvatarListItem`
- :class:`~OneLineIconListItem`
- :class:`~TwoLineIconListItem`
- :class:`~ThreeLineIconListItem`
- :class:`~OneLineAvatarIconListItem`
- :class:`~TwoLineAvatarIconListItem`
- :class:`~ThreeLineAvatarIconListItem`
These widgets will take other widgets that inherit from :class:`~ILeftBody`,
:class:`ILeftBodyTouch`, :class:`~IRightBody` or :class:`~IRightBodyTouch` and
put them in their corresponding container.
As the name implies, :class:`~ILeftBody` and :class:`~IRightBody` will signal
that the widget goes into the left or right container, respectively.
:class:`~ILeftBodyTouch` and :class:`~IRightBodyTouch` do the same thing,
except these widgets will also receive touch events that occur within their
surfaces.
Python example:
.. code-block:: python
class ContactPhoto(ILeftBody, AsyncImage):
pass
class MessageButton(IRightBodyTouch, MDIconButton):
phone_number = StringProperty()
def on_release(self):
# sample code:
Dialer.send_sms(phone_number, "Hey! What's up?")
pass
# Sets up ScrollView with MDList, as normally used in Android:
sv = ScrollView()
ml = MDList()
sv.add_widget(ml)
contacts = [
["Annie", "555-24235", "http://myphotos.com/annie.png"],
["Bob", "555-15423", "http://myphotos.com/bob.png"],
["Claire", "555-66098", "http://myphotos.com/claire.png"]
]
for c in contacts:
item = TwoLineAvatarIconListItem(
text=c[0],
secondary_text=c[1]
)
item.add_widget(ContactPhoto(source=c[2]))
item.add_widget(MessageButton(phone_number=c[1])
ml.add_widget(item)
API
---
"""
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty, NumericProperty,\
ListProperty, OptionProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
import kivymd.material_resources as m_res
from kivymd.ripplebehavior import RectangularRippleBehavior
from kivymd.theming import ThemableBehavior
from kivymd.font_definitions import theme_font_styles
Builder.load_string('''
#:import m_res kivymd.material_resources
<MDList>
cols: 1
size_hint_y: None
height: self._min_list_height
padding: 0, self._list_vertical_padding
<BaseListItem>
size_hint_y: None
canvas:
Color:
rgba:
self.theme_cls.divider_color if root.divider is not None\
else (0, 0, 0, 0)
Line:
points: (root.x ,root.y, root.x+self.width, root.y)\
if root.divider == 'Full' else\
(root.x+root._txt_left_pad, root.y,\
root.x+self.width-root._txt_left_pad-root._txt_right_pad,\
root.y)
BoxLayout:
id: _text_container
orientation: 'vertical'
pos: root.pos
padding:
root._txt_left_pad, root._txt_top_pad,\
root._txt_right_pad, root._txt_bot_pad
MDLabel:
id: _lbl_primary
text: root.text
font_style: root.font_style
theme_text_color: root.theme_text_color
text_color: root.text_color
size_hint_y: None
height: self.texture_size[1]
markup: True
shorten_from: 'right'
shorten: True
MDLabel:
id: _lbl_secondary
text: '' if root._num_lines == 1 else root.secondary_text
font_style: root.secondary_font_style
theme_text_color: root.secondary_theme_text_color
text_color: root.secondary_text_color
size_hint_y: None
height: 0 if root._num_lines == 1 else self.texture_size[1]
shorten: True if root._num_lines == 2 else False
shorten_from: 'right'
markup: True
<OneLineAvatarListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height/2 - self.height/2
size: dp(40), dp(40)
<ThreeLineAvatarListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height - root._txt_top_pad - self.height - dp(5)
size: dp(40), dp(40)
<OneLineIconListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
<ThreeLineIconListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height - root._txt_top_pad - self.height - dp(5)
size: dp(48), dp(48)
<OneLineRightIconListItem>
BoxLayout:
id: _right_container
size_hint: None, None
x: root.x + root.width - m_res.HORIZ_MARGINS - self.width
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
<ThreeLineRightIconListItem>
BoxLayout:
id: _right_container
size_hint: None, None
x: root.x + root.width - m_res.HORIZ_MARGINS - self.width
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
<OneLineAvatarIconListItem>
BoxLayout:
id: _right_container
size_hint: None, None
x: root.x + root.width - m_res.HORIZ_MARGINS - self.width
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
<TwoLineAvatarIconListItem>
BoxLayout:
id: _right_container
size_hint: None, None
x: root.x + root.width - m_res.HORIZ_MARGINS - self.width
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
<ThreeLineAvatarIconListItem>
BoxLayout:
id: _right_container
size_hint: None, None
x: root.x + root.width - m_res.HORIZ_MARGINS - self.width
y: root.y + root.height - root._txt_top_pad - self.height - dp(5)
size: dp(48), dp(48)
''')
class MDList(GridLayout):
"""ListItem container. Best used in conjunction with a
:class:`kivy.uix.ScrollView`.
When adding (or removing) a widget, it will resize itself to fit its
children, plus top and bottom paddings as described by the MD spec.
"""
selected = ObjectProperty()
_min_list_height = dp(16)
_list_vertical_padding = dp(8)
icon = StringProperty()
def add_widget(self, widget, index=0, canvas=None):
super().add_widget(widget, index, canvas)
self.height += widget.height
def remove_widget(self, widget):
super().remove_widget(widget)
self.height -= widget.height
class BaseListItem(ThemableBehavior, RectangularRippleBehavior,
ButtonBehavior, FloatLayout):
"""Base class to all ListItems. Not supposed to be instantiated on its own.
"""
text = StringProperty()
"""Text shown in the first line.
:attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults
to "".
"""
text_color = ListProperty(None)
""" Text color used if theme_text_color is set to 'Custom' """
font_style = OptionProperty('Subtitle1', options=theme_font_styles)
theme_text_color = StringProperty('Primary', allownone=True)
""" Theme text color for primary text """
secondary_text = StringProperty()
"""Text shown in the second and potentially third line.
The text will wrap into the third line if the ListItem's type is set to
\'one-line\'. It can be forced into the third line by adding a \\n
escape sequence.
:attr:`secondary_text` is a :class:`~kivy.properties.StringProperty` and
defaults to "".
"""
secondary_text_color = ListProperty(None)
""" Text color used for secondary text if secondary_theme_text_color
is set to 'Custom' """
secondary_theme_text_color = StringProperty('Secondary', allownone=True)
""" Theme text color for secondary primary text """
secondary_font_style = OptionProperty('Body1', options=theme_font_styles)
divider = OptionProperty('Full',
options=['Full', 'Inset', None], allownone=True)
_txt_left_pad = NumericProperty(dp(16))
_txt_top_pad = NumericProperty()
_txt_bot_pad = NumericProperty()
_txt_right_pad = NumericProperty(m_res.HORIZ_MARGINS)
_num_lines = 2
class ILeftBody:
"""Pseudo-interface for widgets that go in the left container for
ListItems that support it.
Implements nothing and requires no implementation, for annotation only.
"""
pass
class ILeftBodyTouch:
"""Same as :class:`~ILeftBody`, but allows the widget to receive touch
events instead of triggering the ListItem's ripple effect
"""
pass
class IRightBody:
"""Pseudo-interface for widgets that go in the right container for
ListItems that support it.
Implements nothing and requires no implementation, for annotation only.
"""
pass
class IRightBodyTouch:
"""Same as :class:`~IRightBody`, but allows the widget to receive touch
events instead of triggering the ListItem's ripple effect
"""
pass
class ContainerSupport:
"""Overrides add_widget in a ListItem to include support for I*Body
widgets when the appropiate containers are present.
"""
_touchable_widgets = ListProperty()
def add_widget(self, widget, index=0):
if issubclass(widget.__class__, ILeftBody):
self.ids._left_container.add_widget(widget)
elif issubclass(widget.__class__, ILeftBodyTouch):
self.ids._left_container.add_widget(widget)
self._touchable_widgets.append(widget)
elif issubclass(widget.__class__, IRightBody):
self.ids._right_container.add_widget(widget)
elif issubclass(widget.__class__, IRightBodyTouch):
self.ids._right_container.add_widget(widget)
self._touchable_widgets.append(widget)
else:
return super().add_widget(widget)
def remove_widget(self, widget):
super().remove_widget(widget)
if widget in self._touchable_widgets:
self._touchable_widgets.remove(widget)
def on_touch_down(self, touch):
if self.propagate_touch_to_touchable_widgets(touch, 'down'):
return
super().on_touch_down(touch)
def on_touch_move(self, touch, *args):
if self.propagate_touch_to_touchable_widgets(touch, 'move', *args):
return
super().on_touch_move(touch, *args)
def on_touch_up(self, touch):
if self.propagate_touch_to_touchable_widgets(touch, 'up'):
return
super().on_touch_up(touch)
def propagate_touch_to_touchable_widgets(self, touch, touch_event, *args):
triggered = False
for i in self._touchable_widgets:
if i.collide_point(touch.x, touch.y):
triggered = True
if touch_event == 'down':
i.on_touch_down(touch)
elif touch_event == 'move':
i.on_touch_move(touch, *args)
elif touch_event == 'up':
i.on_touch_up(touch)
return triggered
class OneLineListItem(BaseListItem):
"""A one line list item"""
_txt_top_pad = NumericProperty(dp(16))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(48)
class TwoLineListItem(BaseListItem):
"""A two line list item"""
_txt_top_pad = NumericProperty(dp(20))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(72)
class ThreeLineListItem(BaseListItem):
"""A three line list item"""
_txt_top_pad = NumericProperty(dp(16))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 3
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(88)
class OneLineAvatarListItem(ContainerSupport, BaseListItem):
_txt_left_pad = NumericProperty(dp(72))
_txt_top_pad = NumericProperty(dp(20))
_txt_bot_pad = NumericProperty(dp(19)) # dp(24) - dp(5)
_num_lines = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(56)
class TwoLineAvatarListItem(OneLineAvatarListItem):
_txt_top_pad = NumericProperty(dp(20))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(72)
class ThreeLineAvatarListItem(ContainerSupport, ThreeLineListItem):
_txt_left_pad = NumericProperty(dp(72))
class OneLineIconListItem(ContainerSupport, OneLineListItem):
_txt_left_pad = NumericProperty(dp(72))
class TwoLineIconListItem(OneLineIconListItem):
_txt_top_pad = NumericProperty(dp(20))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(72)
class ThreeLineIconListItem(ContainerSupport, ThreeLineListItem):
_txt_left_pad = NumericProperty(dp(72))
class OneLineRightIconListItem(ContainerSupport, OneLineListItem):
# dp(40) = dp(16) + dp(24):
_txt_right_pad = NumericProperty(dp(40) + m_res.HORIZ_MARGINS)
class TwoLineRightIconListItem(OneLineRightIconListItem):
_txt_top_pad = NumericProperty(dp(20))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(72)
class ThreeLineRightIconListItem(ContainerSupport, ThreeLineListItem):
# dp(40) = dp(16) + dp(24):
_txt_right_pad = NumericProperty(dp(40) + m_res.HORIZ_MARGINS)
class OneLineAvatarIconListItem(OneLineAvatarListItem):
# dp(40) = dp(16) + dp(24):
_txt_right_pad = NumericProperty(dp(40) + m_res.HORIZ_MARGINS)
class TwoLineAvatarIconListItem(TwoLineAvatarListItem):
# dp(40) = dp(16) + dp(24):
_txt_right_pad = NumericProperty(dp(40) + m_res.HORIZ_MARGINS)
class ThreeLineAvatarIconListItem(ThreeLineAvatarListItem):
# dp(40) = dp(16) + dp(24):
_txt_right_pad = NumericProperty(dp(40) + m_res.HORIZ_MARGINS)
| 29.897569 | 81 | 0.657976 |
4a1b0447bc6a57ace04a072cae09d8a467840072
| 965 |
py
|
Python
|
sampler2.py
|
benthomasson/zeromq_capnproto_test
|
fc33f85f0c68148060fa78dc78edf7d334e1f791
|
[
"Apache-2.0"
] | null | null | null |
sampler2.py
|
benthomasson/zeromq_capnproto_test
|
fc33f85f0c68148060fa78dc78edf7d334e1f791
|
[
"Apache-2.0"
] | null | null | null |
sampler2.py
|
benthomasson/zeromq_capnproto_test
|
fc33f85f0c68148060fa78dc78edf7d334e1f791
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import capnp # noqa
import measurements_capnp
import psutil
import zmq
import time
import datetime
from itertools import count
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5559")
counter = count(0)
def sample():
timestamp = datetime.datetime.utcnow().isoformat()
sample = measurements_capnp.Measurements.new_message()
sample.timestamp = timestamp
sample.id = next(counter)
sample.cpu.cpu = psutil.cpu_percent()
mem = psutil.virtual_memory()
sample.memory.total = mem.total
sample.memory.available = mem.available
sample.memory.percent = mem.percent
sample.memory.used = mem.used
sample.memory.free = mem.free
sample.memory.active = mem.active
sample.memory.inactive = mem.inactive
socket.send_multipart([b"Measurements", sample.to_bytes()])
while True:
sample()
time.sleep(0.1)
| 24.74359 | 63 | 0.719171 |
4a1b0551137d80a5c0e2d4758d79fcdf2021ba87
| 535 |
py
|
Python
|
django/djangoproject/burger_restaurant/migrations/0016_alter_order_order_status.py
|
Mango-Smoothie/Burger_Restaurant
|
858ac08d0791a536993ce253c6741b4b073d4e84
|
[
"MIT"
] | 1 |
2021-11-30T14:13:41.000Z
|
2021-11-30T14:13:41.000Z
|
django/djangoproject/burger_restaurant/migrations/0016_alter_order_order_status.py
|
Mango-Smoothie/Burger_Restaurant
|
858ac08d0791a536993ce253c6741b4b073d4e84
|
[
"MIT"
] | null | null | null |
django/djangoproject/burger_restaurant/migrations/0016_alter_order_order_status.py
|
Mango-Smoothie/Burger_Restaurant
|
858ac08d0791a536993ce253c6741b4b073d4e84
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-29 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('burger_restaurant', '0015_alter_customer_options'),
]
operations = [
migrations.AlterField(
model_name='order',
name='order_status',
field=models.CharField(choices=[('Not Started', 'Not Started'), ('In Progress', 'In Progress'), ('Finished', 'Finished')], max_length=100, verbose_name='Order Status'),
),
]
| 28.157895 | 180 | 0.629907 |
4a1b05ca6d1001e5da9f3754ccd44ddc9731f7b1
| 1,926 |
py
|
Python
|
tests/test_init_extra.py
|
google/evojax
|
d5b450b1834683e8b2196d081d8602fc14f262d9
|
[
"Apache-2.0"
] | 365 |
2022-02-08T07:41:33.000Z
|
2022-03-31T23:35:35.000Z
|
tests/test_init_extra.py
|
google/evojax
|
d5b450b1834683e8b2196d081d8602fc14f262d9
|
[
"Apache-2.0"
] | 16 |
2022-02-13T11:29:53.000Z
|
2022-03-31T11:00:34.000Z
|
tests/test_init_extra.py
|
google/evojax
|
d5b450b1834683e8b2196d081d8602fc14f262d9
|
[
"Apache-2.0"
] | 24 |
2022-02-11T04:19:35.000Z
|
2022-03-15T02:44:35.000Z
|
# Copyright 2022 The EvoJAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TestTask:
def test_mnist(self):
import sys
if sys.version_info.major == 3 and sys.version_info.minor <= 9:
# python<=3.9, required by the optional torchvision (see https://pypi.org/project/torchvision/)
from evojax.task.mnist import MNIST
_ = MNIST()
assert True
def test_mdkp(self):
from evojax.task.mdkp import MDKP
_ = MDKP()
assert True
class TestPolicy:
pass
class TestAlgo:
def test_cma(self):
from evojax.algo import CMA
_ = CMA(pop_size=16, param_size=16)
assert True
def test_simple_ga(self):
from evojax.algo import SimpleGA
_ = SimpleGA(pop_size=16, param_size=16)
assert True
def test_open_es(self):
import sys
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
# python>=3.7, required by the optional evosax
from evojax.algo import OpenES
_ = OpenES(pop_size=16, param_size=16)
assert True
def test_ars(self):
import sys
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
# python>=3.7, required by the optional evosax
from evojax.algo import ARS
_ = ARS(pop_size=16, param_size=16)
assert True
| 31.064516 | 107 | 0.646417 |
4a1b0632906bc73b224de08cf337f803b06ec06a
| 263 |
py
|
Python
|
django_fastapi/api/admin.py
|
sfcol/fast-api-django
|
849f6055b580024f409a6a6e215284f818d57323
|
[
"MIT"
] | null | null | null |
django_fastapi/api/admin.py
|
sfcol/fast-api-django
|
849f6055b580024f409a6a6e215284f818d57323
|
[
"MIT"
] | null | null | null |
django_fastapi/api/admin.py
|
sfcol/fast-api-django
|
849f6055b580024f409a6a6e215284f818d57323
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from api.models import Item , User
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ("id", "title")
@admin.register(User)
class ItemAdmin(admin.ModelAdmin):
list_display = ("id", "name", "email")
| 21.916667 | 42 | 0.718631 |
4a1b06b1db20c4fff4a19d1079b2dc471f00817e
| 4,793 |
py
|
Python
|
kubernetes/client/models/v1_label_selector.py
|
craigtracey/python
|
177564c655c0ea3f9cf38e61ca275ef6c6256aab
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_label_selector.py
|
craigtracey/python
|
177564c655c0ea3f9cf38e61ca275ef6c6256aab
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_label_selector.py
|
craigtracey/python
|
177564c655c0ea3f9cf38e61ca275ef6c6256aab
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LabelSelector(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'match_expressions': 'list[V1LabelSelectorRequirement]',
'match_labels': 'dict(str, str)'
}
attribute_map = {
'match_expressions': 'matchExpressions',
'match_labels': 'matchLabels'
}
def __init__(self, match_expressions=None, match_labels=None):
"""
V1LabelSelector - a model defined in Swagger
"""
self._match_expressions = None
self._match_labels = None
self.discriminator = None
if match_expressions is not None:
self.match_expressions = match_expressions
if match_labels is not None:
self.match_labels = match_labels
@property
def match_expressions(self):
"""
Gets the match_expressions of this V1LabelSelector.
matchExpressions is a list of label selector requirements. The requirements are ANDed.
:return: The match_expressions of this V1LabelSelector.
:rtype: list[V1LabelSelectorRequirement]
"""
return self._match_expressions
@match_expressions.setter
def match_expressions(self, match_expressions):
"""
Sets the match_expressions of this V1LabelSelector.
matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param match_expressions: The match_expressions of this V1LabelSelector.
:type: list[V1LabelSelectorRequirement]
"""
self._match_expressions = match_expressions
@property
def match_labels(self):
"""
Gets the match_labels of this V1LabelSelector.
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.
:return: The match_labels of this V1LabelSelector.
:rtype: dict(str, str)
"""
return self._match_labels
@match_labels.setter
def match_labels(self, match_labels):
"""
Sets the match_labels of this V1LabelSelector.
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.
:param match_labels: The match_labels of this V1LabelSelector.
:type: dict(str, str)
"""
self._match_labels = match_labels
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LabelSelector):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.922581 | 269 | 0.606718 |
4a1b08b538c2c588b39e1f8899c4466f64b04ffe
| 9,525 |
py
|
Python
|
xesmf/tests/test_backend.py
|
andersy005/xESMF
|
3b23a7ee6b5a580882bd62cd1ef5bf7062f4ba4d
|
[
"MIT"
] | 1 |
2020-11-07T18:55:41.000Z
|
2020-11-07T18:55:41.000Z
|
xesmf/tests/test_backend.py
|
andersy005/xESMF
|
3b23a7ee6b5a580882bd62cd1ef5bf7062f4ba4d
|
[
"MIT"
] | null | null | null |
xesmf/tests/test_backend.py
|
andersy005/xESMF
|
3b23a7ee6b5a580882bd62cd1ef5bf7062f4ba4d
|
[
"MIT"
] | null | null | null |
import os
import ESMF
import numpy as np
import pytest
import xarray as xr
from numpy.testing import assert_almost_equal, assert_equal
import xesmf as xe
from xesmf.backend import (
add_corner,
esmf_grid,
esmf_locstream,
esmf_regrid_apply,
esmf_regrid_build,
esmf_regrid_finalize,
warn_f_contiguous,
warn_lat_range,
)
from xesmf.smm import apply_weights, read_weights
# We use pure numpy arrays to test backend
# xarray DataSet is only used at the very beginning as a quick way to make data
coord_names = ['lon', 'lat', 'lon_b', 'lat_b']
ds_in = xe.util.grid_global(20, 12)
lon_in, lat_in, lon_b_in, lat_b_in = [ds_in[name].values for name in coord_names]
ds_out = xe.util.grid_global(15, 9)
lon_out, lat_out, lon_b_out, lat_b_out = [ds_out[name].values for name in coord_names]
# shortcut to test a single grid
lon, lat, lon_b, lat_b = [lon_in, lat_in, lon_b_in, lat_b_in]
# input test data
ds_in['data'] = xe.data.wave_smooth(ds_in['lon'], ds_in['lat'])
data_in = ds_in['data'].values
# reference output data, calculated analytically
ds_out['data_ref'] = xe.data.wave_smooth(ds_out['lon'], ds_out['lat'])
data_ref = ds_out['data_ref'].values
# 4D data to test broadcasting, increasing linearly with time and lev
ds_in.coords['time'] = np.arange(1, 11)
ds_in.coords['lev'] = np.arange(1, 51)
ds_in['data4D'] = ds_in['time'] * ds_in['lev'] * ds_in['data']
data4D_in = ds_in['data4D'].values
def test_warn_f_on_array():
a = np.zeros([2, 2], order='C')
with pytest.warns(UserWarning):
warn_f_contiguous(a)
def test_warn_f_on_grid():
# should throw a warning if not passing transpose
with pytest.warns(UserWarning):
esmf_grid(lon, lat)
def test_warn_lat_range():
# latitude goes to -100 (invalid value)
ds_temp = xe.util.grid_2d(-180, 180, 10, -100, 90, 5)
with pytest.warns(UserWarning):
warn_lat_range(ds_temp['lat'].values)
with pytest.warns(UserWarning):
warn_lat_range(ds_temp['lat_b'].values)
def test_esmf_grid_with_corner():
# only center coordinate, no corners
# remember to pass transpose (F-ordered) to backend
grid = esmf_grid(lon.T, lat.T)
# make sure coordinate values agree
assert_equal(grid.coords[0][0], lon.T)
assert_equal(grid.coords[0][1], lat.T)
# make sure meta data agree
assert not grid.has_corners # no corner yet!
assert grid.staggerloc == [True, False, False, False]
assert grid.coord_sys is ESMF.CoordSys.SPH_DEG
assert grid.rank == 2
assert_equal(grid.size[0], lon.T.shape)
assert_equal(grid.upper_bounds[0], lon.T.shape)
assert_equal(grid.lower_bounds[0], np.array([0, 0]))
# now add corner information
add_corner(grid, lon_b.T, lat_b.T)
# coordinate values
assert_equal(grid.coords[3][0], lon_b.T)
assert_equal(grid.coords[3][1], lat_b.T)
# metadata
assert grid.has_corners # should have corner now
assert grid.staggerloc == [True, False, False, True]
assert_equal(grid.size[3], lon_b.T.shape)
assert_equal(grid.upper_bounds[3], lon_b.T.shape)
assert_equal(grid.lower_bounds[3], np.array([0, 0]))
def test_esmf_build_bilinear():
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
regrid = esmf_regrid_build(grid_in, grid_out, 'bilinear')
assert regrid.unmapped_action is ESMF.UnmappedAction.IGNORE
assert regrid.regrid_method is ESMF.RegridMethod.BILINEAR
# they should share the same memory
regrid.srcfield.grid is grid_in
regrid.dstfield.grid is grid_out
esmf_regrid_finalize(regrid)
def test_esmf_extrapolation():
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
regrid = esmf_regrid_build(grid_in, grid_out, 'bilinear')
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
# without extrapolation, the first and last lines/columns = 0
assert data_out_esmpy[0, 0] == 0
regrid = esmf_regrid_build(
grid_in,
grid_out,
'bilinear',
extrap_method='inverse_dist',
extrap_num_src_pnts=3,
extrap_dist_exponent=1,
)
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
# the 3 closest points in data_in are 2.010, 2.005, and 1.992. The result should be roughly equal to 2.0
assert np.round(data_out_esmpy[0, 0], 1) == 2.0
def test_regrid():
# use conservative regridding as an example,
# since it is the most well-tested studied one in papers
# TODO: possible to break this long test into smaller tests?
# not easy due to strong dependencies.
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
# no corner info yet, should not be able to use conservative
with pytest.raises(ValueError):
esmf_regrid_build(grid_in, grid_out, 'conservative')
# now add corners
add_corner(grid_in, lon_b_in.T, lat_b_in.T)
add_corner(grid_out, lon_b_out.T, lat_b_out.T)
# also write to file for scipy regridding
filename = 'test_weights.nc'
if os.path.exists(filename):
os.remove(filename)
regrid = esmf_regrid_build(grid_in, grid_out, 'conservative', filename=filename)
assert regrid.regrid_method is ESMF.RegridMethod.CONSERVE
# apply regridding using ESMPy's native method
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
rel_err = (data_out_esmpy - data_ref) / data_ref # relative error
assert np.max(np.abs(rel_err)) < 0.05
# apply regridding using scipy
weights = read_weights(filename, lon_in.size, lon_out.size)
shape_in = lon_in.shape
shape_out = lon_out.shape
data_out_scipy = apply_weights(weights, data_in, shape_in, shape_out)
# must be almost exactly the same as esmpy's result!
assert_almost_equal(data_out_scipy, data_out_esmpy)
# finally, test broadcasting with scipy
# TODO: need to test broadcasting with ESMPy backend?
# We only use Scipy in frontend, and ESMPy is just for backend benchmark
# However, it is useful to compare performance and show scipy is 3x faster
data4D_out = apply_weights(weights, data4D_in, shape_in, shape_out)
# data over broadcasting dimensions should agree
assert_almost_equal(data4D_in.mean(axis=(2, 3)), data4D_out.mean(axis=(2, 3)), decimal=10)
# clean-up
esmf_regrid_finalize(regrid)
os.remove(filename)
def test_regrid_periodic_wrong():
# not using periodic grid
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
assert grid_in.num_peri_dims == 0
assert grid_in.periodic_dim is None
regrid = esmf_regrid_build(grid_in, grid_out, 'bilinear')
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
rel_err = (data_out_esmpy - data_ref) / data_ref # relative error
assert np.max(np.abs(rel_err)) == 1.0 # some data will be missing
# clean-up
esmf_regrid_finalize(regrid)
def test_regrid_periodic_correct():
# only need to specific periodic for input grid
grid_in = esmf_grid(lon_in.T, lat_in.T, periodic=True)
grid_out = esmf_grid(lon_out.T, lat_out.T)
assert grid_in.num_peri_dims == 1
assert grid_in.periodic_dim == 0 # the first axis, longitude
regrid = esmf_regrid_build(grid_in, grid_out, 'bilinear')
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
rel_err = (data_out_esmpy - data_ref) / data_ref # relative error
assert np.max(np.abs(rel_err)) < 0.065
# clean-up
esmf_regrid_finalize(regrid)
def test_esmf_locstream():
lon = np.arange(5)
lat = np.arange(5)
ls = esmf_locstream(lon, lat)
assert isinstance(ls, ESMF.LocStream)
lon2d, lat2d = np.meshgrid(lon, lat)
with pytest.raises(ValueError):
ls = esmf_locstream(lon2d, lat2d)
with pytest.raises(ValueError):
ls = esmf_locstream(lon, lat2d)
with pytest.raises(ValueError):
ls = esmf_locstream(lon2d, lat)
grid_in = esmf_grid(lon_in.T, lat_in.T, periodic=True)
esmf_regrid_build(grid_in, ls, 'bilinear')
esmf_regrid_build(ls, grid_in, 'nearest_s2d')
def test_read_weights(tmp_path):
fn = tmp_path / 'weights.nc'
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
regrid_memory = esmf_regrid_build(grid_in, grid_out, method='bilinear')
esmf_regrid_build(grid_in, grid_out, method='bilinear', filename=str(fn))
w = regrid_memory.get_weights_dict(deep_copy=True)
sm = read_weights(w, lon_in.size, lon_out.size)
# Test Path and string to netCDF file against weights dictionary
np.testing.assert_array_equal(
read_weights(fn, lon_in.size, lon_out.size).todense(), sm.todense()
)
np.testing.assert_array_equal(
read_weights(str(fn), lon_in.size, lon_out.size).todense(), sm.todense()
)
# Test xr.Dataset
np.testing.assert_array_equal(
read_weights(xr.open_dataset(fn), lon_in.size, lon_out.size).todense(),
sm.todense(),
)
# Test COO matrix
np.testing.assert_array_equal(
read_weights(sm, lon_in.size, lon_out.size).todense(), sm.todense()
)
# Test failures
with pytest.raises(IOError):
read_weights(tmp_path / 'wrong_file.nc', lon_in.size, lon_out.size)
with pytest.raises(ValueError):
read_weights({}, lon_in.size, lon_out.size)
with pytest.raises(ValueError):
ds = xr.open_dataset(fn)
read_weights(ds.drop_vars('col'), lon_in.size, lon_out.size)
| 31.963087 | 108 | 0.704147 |
4a1b08c8b936516598b7726342a6673b22bfe56c
| 906 |
py
|
Python
|
tests/core/full_node/ram_db.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/ram_db.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/ram_db.py
|
Tony4467/littlelambocoin-blockchain
|
3d4f2b577cd5a2feb324fca50e0981a728583aee
|
[
"Apache-2.0"
] | null | null | null |
from typing import Tuple
import aiosqlite
from littlelambocoin.consensus.blockchain import Blockchain
from littlelambocoin.consensus.constants import ConsensusConstants
from littlelambocoin.full_node.block_store import BlockStore
from littlelambocoin.full_node.coin_store import CoinStore
from littlelambocoin.full_node.hint_store import HintStore
from littlelambocoin.util.db_wrapper import DBWrapper
async def create_ram_blockchain(consensus_constants: ConsensusConstants) -> Tuple[aiosqlite.Connection, Blockchain]:
connection = await aiosqlite.connect(":memory:")
db_wrapper = DBWrapper(connection)
block_store = await BlockStore.create(db_wrapper)
coin_store = await CoinStore.create(db_wrapper)
hint_store = await HintStore.create(db_wrapper)
blockchain = await Blockchain.create(coin_store, block_store, consensus_constants, hint_store)
return connection, blockchain
| 43.142857 | 116 | 0.833333 |
4a1b08de3dff2447956a0e48d01430ec0adb024f
| 3,044 |
py
|
Python
|
train.py
|
Stanford-ILIAD/RL_Routing
|
309beadebad84ec46fdad75b688848e36f10b214
|
[
"MIT"
] | 4 |
2021-07-23T01:14:36.000Z
|
2022-03-05T06:24:57.000Z
|
train.py
|
Stanford-ILIAD/RL_Routing
|
309beadebad84ec46fdad75b688848e36f10b214
|
[
"MIT"
] | null | null | null |
train.py
|
Stanford-ILIAD/RL_Routing
|
309beadebad84ec46fdad75b688848e36f10b214
|
[
"MIT"
] | 1 |
2021-10-04T20:38:17.000Z
|
2021-10-04T20:38:17.000Z
|
import os
from baselines.common import tf_util as U
from baselines import logger
from baselines.common import set_global_seeds
import gym
import gym_trafficnetwork
from mpi4py import MPI
from baselines.bench import Monitor
sim_duration = 5.0 # hours
network_type = 'multiOD' # type 'parallel' or 'general' or 'multiOD'
P = 3 # number of paths (only for parallel -- the general network graph is defined inside its environment file)
accident_param = 0.6 # expected number of accidents in 1 hour
def train(env, seed, model_path=None):
from baselines.ppo1 import mlp_policy, pposgd_simple
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=256, num_hid_layers=2)
rank = MPI.COMM_WORLD.Get_rank()
workerseed = seed + 10000 * rank if seed is not None else None
set_global_seeds(workerseed)
env_max_step_size = env.max_step_size
env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
env.seed(workerseed)
pi = pposgd_simple.learn(env, policy_fn,
max_timesteps=4e7,
timesteps_per_actorbatch=4*env_max_step_size, # we used 32 CPUs
clip_param=0.2, entcoeff=0.005,
optim_epochs=5,
optim_stepsize=3e-4,
optim_batchsize=64,
gamma=0.99,
lam=0.95,
schedule='linear',
)
env.close()
if model_path:
U.save_state(model_path)
return pi
def main():
logger.configure()
if network_type.lower() == 'parallel':
filename = 'ParallelNetworkP' + str(P) + 'Accidents' + str(1 if accident_param > 0 else 0)
env = gym.make('ParallelNetwork-v0')
elif network_type.lower() == 'general':
filename = 'GeneralNetworkAccidents' + str(1 if accident_param > 0 else 0)
env = gym.make('GeneralNetwork-v0')
elif network_type.lower() == 'multiod':
filename = 'GeneralNetworkMultiODAccidents' + str(1 if accident_param > 0 else 0)
env = gym.make('GeneralNetworkMultiOD-v0')
else:
assert False, 'network_type is invalid.'
model_path = os.path.join('trained_models', filename)
env.set('sim_duration', sim_duration) # hours
env.set('start_empty', False)
env.set('start_from_equilibrium', False)
if network_type.lower() == 'parallel': env.set('P', P)
env.set('init_learn_rate', 0.5)
env.set('constant_learn_rate', True)
env.set('accident_param', accident_param) # expected number of accidents in 1 hour
env.set('demand', [[[0.346,0.519],[0.346,0.519]],[[0.346,0.519],[0.346,0.519]]]) # human-driven and autonomous cars per second, respectively
env.set('demand_noise_std', [[[0.0346,0.0519],[0.0346,0.0519]],[[0.0346,0.0519],[0.0346,0.0519]]]) # human-driven and autonomous cars per second, respectively
# train the model
train(env, seed=None, model_path=model_path)
if __name__ == '__main__':
main()
| 39.532468 | 162 | 0.672799 |
4a1b095a3afe086c3d2da056a45356bf53b3d58d
| 166 |
py
|
Python
|
habr_art_stat/exceptins.py
|
nikolnikon/otus_web_habr_art_stats
|
c53d03a481e666229ad998ee3f488d69972db820
|
[
"MIT"
] | null | null | null |
habr_art_stat/exceptins.py
|
nikolnikon/otus_web_habr_art_stats
|
c53d03a481e666229ad998ee3f488d69972db820
|
[
"MIT"
] | null | null | null |
habr_art_stat/exceptins.py
|
nikolnikon/otus_web_habr_art_stats
|
c53d03a481e666229ad998ee3f488d69972db820
|
[
"MIT"
] | null | null | null |
class ParseError(Exception):
def __init__(self, message=None):
super(ParseError, self).__init__('Ошибка при парсинге сайта habr.com. {}'.format(message))
| 41.5 | 98 | 0.716867 |
4a1b0babd7102c80c9c56c4f69d8a858ed32af90
| 1,211 |
py
|
Python
|
A_Structured_Self-attentive_Sentence_Embedding_cls/build_vocab.py
|
aisolab/nlp_implementation
|
21ea6e3f5737e7074bdd8dd190e5f5172f86f6bf
|
[
"MIT"
] | 181 |
2019-03-11T11:55:17.000Z
|
2020-05-25T04:50:21.000Z
|
A_Structured_Self-attentive_Sentence_Embedding_cls/build_vocab.py
|
aisolab/nlp_implementation
|
21ea6e3f5737e7074bdd8dd190e5f5172f86f6bf
|
[
"MIT"
] | 13 |
2019-06-28T02:39:20.000Z
|
2020-05-12T07:10:14.000Z
|
Convolutional_Neural_Networks_for_Sentence_Classification/build_vocab.py
|
aisolab/nlp_implementation
|
21ea6e3f5737e7074bdd8dd190e5f5172f86f6bf
|
[
"MIT"
] | 38 |
2019-03-13T00:57:28.000Z
|
2020-05-21T09:45:03.000Z
|
import itertools
import pickle
import gluonnlp as nlp
import pandas as pd
from pathlib import Path
from model.utils import Vocab
from model.split import split_morphs
from utils import Config
from collections import Counter
# loading dataset
nsmc_dir = Path("nsmc")
config = Config("conf/dataset/nsmc.json")
tr = pd.read_csv(config.train, sep="\t").loc[:, ["document", "label"]]
# extracting morph in sentences
list_of_tokens = tr["document"].apply(split_morphs).tolist()
# generating the vocab
token_counter = Counter(itertools.chain.from_iterable(list_of_tokens))
tmp_vocab = nlp.Vocab(
counter=token_counter, min_freq=10, bos_token=None, eos_token=None
)
# connecting SISG embedding with vocab
ptr_embedding = nlp.embedding.create("fasttext", source="wiki.ko")
tmp_vocab.set_embedding(ptr_embedding)
array = tmp_vocab.embedding.idx_to_vec.asnumpy()
vocab = Vocab(
tmp_vocab.idx_to_token,
padding_token="<pad>",
unknown_token="<unk>",
bos_token=None,
eos_token=None,
)
vocab.embedding = array
# saving vocab
with open(nsmc_dir / "vocab.pkl", mode="wb") as io:
pickle.dump(vocab, io)
config.update({"vocab": str(nsmc_dir / "vocab.pkl")})
config.save("conf/dataset/nsmc.json")
| 26.911111 | 70 | 0.752271 |
4a1b0c5694ff2e41ac0bbffcd75eaa23ba0d26a3
| 4,551 |
py
|
Python
|
src/modeline.py
|
tweakoz/zed64
|
c0231444418999191182d53d9319bf7978422bfb
|
[
"CC-BY-3.0"
] | 4 |
2015-06-04T01:14:43.000Z
|
2018-06-16T05:45:57.000Z
|
src/modeline.py
|
tweakoz/zed64
|
c0231444418999191182d53d9319bf7978422bfb
|
[
"CC-BY-3.0"
] | null | null | null |
src/modeline.py
|
tweakoz/zed64
|
c0231444418999191182d53d9319bf7978422bfb
|
[
"CC-BY-3.0"
] | null | null | null |
import os, sys
from myhdl import *
from stdlib import *
###########################################################
# pixelclocks
###########################################################
# pixel_clocks[6], // 148.5
# pixel_clocks[5], // 74.25
# pixel_clocks[4], // 108mhz
# pixel_clocks[3], // 40
# pixel_clocks[2], // 36
# pixel_clocks[1], // 27
# pixel_clocks[0], // 13.5
###########################################################
# mode lines
###########################################################
#Modeline syntax:
#dotclk(mhz)
#hdisp hsyncstart hsyncend htotal
#vdisp vsyncstart vsyncend vtotal
#[flags]
###########################################################
# HDTV works on sony
###########################################################
#ModeLine "1920x1080" 148.50 1920 2008 2052 2200 1080 1084 1088 1125 -HSync -VSync
#ModeLine "ATSC-720-60p" 74.25 1280 1320 1376 1650 720 722 728 750
mode_1080p = ( "1080p", 6, # 148.5
1920,2008,2052,2200,True,
1080,1084,1088,1125,True )
mode_720p = ( "720p", 5, # 74.25
1280,1320,1376,1650,False,
720,722,728,750,False )
###########################################################
# VESA works on sony
###########################################################
#ModeLine "640x480 85hz" 36.00 640 696 752 832 480 481 484 509 -HSync -VSync
#ModeLine "720x480" 27.00 720 736 798 858 480 489 495 525 -HSync -VSync
#ModeLine "800x600" 40.00 800 840 968 1056 600 601 605 628 +HSync +VSync
#ModeLine "1280x1024" 108.00 1280 1328 1440 1688 1024 1025 1028 1066 +HSync +VSync
mode_320t = ( "320test", 2, # 36,
320,324,328,332,True,
240,242,244,248,True )
mode_vga85 = ( "vga85", 2, # 36,
640,696,752,832,True,
480,481,484,509,True )
mode_ntscp = ( "ntscp", 1, # 27
720,736,798,858,True,
480,489,495,525,True )
mmode_svga60 = ( "svga60", 3, # 40
800,840,968,1056,False,
600,601,605,628,False )
mode_1024p = ( "1024p", 4, # 108
1280,1328,1440,1688,False,
1024,1025,1028,1066,False )
mode_test = ( "test", 4, # 108
128,130,132,134,False,
128,136,144,152,False )
modelist = [ mode_ntscp, mmode_svga60, mode_720p, mode_1024p ]
#modelist = [ mode_test, mode_test, mode_test, mode_test ]
###########################################################
mode_hsi,mode_hdisp,mode_hstart,mode_hend,mode_htot = [],[],[],[],[]
mode_vsi,mode_vdisp,mode_vstart,mode_vend,mode_vtot = [],[],[],[],[]
###########################################################
for item in modelist:
name = item[0]
clockid = item[1]
mode_hdisp.append( item[2] )
mode_hstart.append( item[3] )
mode_hend.append( item[4] )
mode_htot.append( item[5] )
mode_hsi.append( item[6] )
mode_vdisp.append( item[7] )
mode_vstart.append( item[8] )
mode_vend.append( item[9] )
mode_vtot.append( item[10] )
mode_vsi.append( item[11] )
###########################################################
def rom(dout, addr, contents):
@always_comb
def read():
dout.next = contents[int(addr)]
return read
###########################################################
def verout(vparams,name,obj,dwidth,awidth,contents):
dout = Signal(intbv(0)[dwidth:])
addr = Signal(intbv(0)[awidth:])
ios = vparams.ios
outfolder = vparams.outfolder
toVerilog.name = name
toVerilog.timescale = vparams.timescale
veri_inst = toVerilog(rom, dout, addr, contents )
os.system("mv %s.v %s/" % (name,outfolder) )
os.system("mv tb_%s.v %s/" % (name,outfolder) )
###########################################################
def gen_verilog( vparams ):
verout( vparams, "modeline_hsi", rom, 1, 2, tuple(mode_hsi) )
verout( vparams, "modeline_hdisp", rom, 12, 2, tuple(mode_hdisp) )
verout( vparams, "modeline_hstart", rom, 12, 2, tuple(mode_hstart) )
verout( vparams, "modeline_hend", rom, 12, 2, tuple(mode_hend) )
verout( vparams, "modeline_htot", rom, 12, 2, tuple(mode_htot) )
verout( vparams, "modeline_vsi", rom, 1, 2, tuple(mode_vsi) )
verout( vparams, "modeline_vdisp", rom, 12, 2, tuple(mode_vdisp) )
verout( vparams, "modeline_vstart", rom, 12, 2, tuple(mode_vstart) )
verout( vparams, "modeline_vend", rom, 12, 2, tuple(mode_vend) )
verout( vparams, "modeline_vtot", rom, 12, 2, tuple(mode_vtot) )
###########################################################
| 32.049296 | 82 | 0.511536 |
4a1b0da87656dc787def3e0421d400d2a72e7c03
| 3,774 |
py
|
Python
|
core/confdb/syntax/virtualrouter/protocols/vrrp/base.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84 |
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
core/confdb/syntax/virtualrouter/protocols/vrrp/base.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22 |
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
core/confdb/syntax/virtualrouter/protocols/vrrp/base.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23 |
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# ConfDB virtual-router <name> protocols vrrp syntax
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from ....defs import DEF
from ....patterns import ANY, INTEGER, IP_ADDRESS, UNIT_NAME, BOOL
VRRP_SYNTAX = DEF(
"vrrp",
[
DEF(
"group",
[
DEF(
ANY,
[
DEF("description", [DEF(ANY, name="description", gen="make_vrrp_group")]),
DEF(
"virtual-address",
[
DEF(
"inet",
[DEF(IP_ADDRESS, name="address", gen="make_vrrp_address")],
),
DEF(
"inet6",
[DEF(IP_ADDRESS, name="address", gen="make_vrrp_address6")],
),
],
),
DEF(
"interface",
[DEF(UNIT_NAME, name="interface", gen="make_vrrp_interface")],
),
DEF("priority", [DEF(INTEGER, name="priority", gen="make_vrrp_priority")]),
DEF(
"authentication",
[
DEF(
"plain-text",
[
DEF(
"key",
[DEF(ANY, name="key", gen="make_vrrp_plain_key")],
)
],
),
DEF(
"md5",
[DEF("key", [DEF(ANY, name="key", gen="make_vrrp_md5_key")])],
),
],
),
DEF(
"timers",
[
DEF(
"advertise-interval",
[
DEF(
INTEGER,
name="interval",
gen="make_vrrp_advertise_interval",
)
],
)
],
),
DEF(
"preempt",
[
DEF(
"enabled", [DEF(BOOL, name="enabled", gen="make_vrrp_preempt")]
),
DEF(
"timer",
[DEF(INTEGER, name="timer", gen="make_vrrp_preempt_timer")],
),
],
),
],
name="group",
multi=True,
required=True,
)
],
required=True,
)
],
)
| 39.726316 | 99 | 0.222576 |
4a1b0dcc53e4c376a7325c4a14a7d8d4bfe1cd94
| 9,927 |
py
|
Python
|
Minesweeper.py
|
LuizHenriquePy/Minesweeper
|
23e4d3a2bcb6ef6c0a05a14bd77ab66284c6a568
|
[
"MIT"
] | null | null | null |
Minesweeper.py
|
LuizHenriquePy/Minesweeper
|
23e4d3a2bcb6ef6c0a05a14bd77ab66284c6a568
|
[
"MIT"
] | null | null | null |
Minesweeper.py
|
LuizHenriquePy/Minesweeper
|
23e4d3a2bcb6ef6c0a05a14bd77ab66284c6a568
|
[
"MIT"
] | null | null | null |
from random import randint
from tkinter import *
from tkinter.messagebox import showinfo
from functools import partial
NEIGHBORS = [
lambda x, y: (x - 1, y - 1), # top left
lambda x, y: (x - 1, y), # top
lambda x, y: (x - 1, y + 1), # top right
lambda x, y: (x, y - 1), # left
lambda x, y: (x, y + 1), # right
lambda x, y: (x + 1, y - 1), # bottom left
lambda x, y: (x + 1, y), # bottom
lambda x, y: (x + 1, y + 1) # bottom right
]
class Matrix:
def __init__(self, numberOfRows, numberOfColumns, numberOfMines):
self.numberOfRows = numberOfRows
self.numberOfColumns = numberOfColumns
self.numberOfMines = numberOfMines
self.neighbors = NEIGHBORS
def creates_the_matrix(self):
self.matrix = [[0 for x in range(self.numberOfColumns)] for x in range(self.numberOfRows)]
def put_mines_in_the_matrix(self):
while True:
self.minePositions = []
self.creates_the_matrix()
while len(self.minePositions) != self.numberOfMines:
minePosition = [randint(0, self.numberOfRows - 1), randint(0, self.numberOfColumns - 1)]
if minePosition not in self.minePositions:
self.minePositions.append(minePosition)
self.matrix[minePosition[0]][minePosition[1]] = 'M'
if self.checks_if_there_are_accumulated_mines_in_the_matrix(self.matrix):
break
def checks_if_there_are_accumulated_mines_in_the_matrix(self, matrix):
for x in range(self.numberOfRows):
for y in range(self.numberOfColumns):
numberOfMines = 0
numberOfNeighbors = 0
for neighborPosition in self.neighbors:
try:
xN, yN = neighborPosition(x, y)
if xN < 0 or yN < 0:
raise IndexError
numberOfNeighbors += 1
if self.matrix[xN][yN] == 'M':
numberOfMines += 1
except IndexError:
pass
if numberOfNeighbors == numberOfMines:
return False
return True
def put_number_in_the_matrix(self):
for x, y in self.minePositions:
for positionNeighbor in self.neighbors:
try:
xN, yN = positionNeighbor(x, y)
if xN < 0 or yN < 0:
raise IndexError
if self.matrix[xN][yN] != 'M':
self.matrix[xN][yN] += 1
except IndexError:
pass
def main(self):
self.creates_the_matrix()
self.put_mines_in_the_matrix()
self.put_number_in_the_matrix()
return self.matrix
class Minesweeper:
def __init__(self, window, matrix):
self.matrix = matrix
self.x = len(self.matrix)
self.y = len(self.matrix[0])
self.window = window
self.flags = []
self.mines = []
self.neighbors = NEIGHBORS
self.matrixButtons = [[y for y in range(self.y)] for x in range(self.x)]
self.game_creator()
self.window.resizable(0, 0)
self.window.title('Minesweeper')
self.window.mainloop()
def game_creator(self):
if self.x > 25:
size = 15
self.window.geometry(f"{self.y * size}x{self.x * size}")
self.images('big')
else:
size = 21
self.window.geometry(f"{self.y * size}x{self.x * size}")
self.images('small')
for x in range(self.x):
for y in range(self.y):
pos = [x, y]
label = Label(self.window, borderwidth=1, relief='groove', bg='darkgrey')
self.matrixButtons[x][y] = Button(self.window, image = self.bgButton)
self.matrixButtons[x][y].bind("<Button-3>", partial(self.right_click, self.matrixButtons[x][y]))
if self.matrix[x][y] == 'M':
self.mines.append(self.matrixButtons[x][y])
self.matrixButtons[x][y].config(command = partial(self.game_over, self.matrixButtons[x][y], label))
label.config(image = self.mine)
else:
self.matrixButtons[x][y].config(command = partial(self.left_click, self.matrixButtons[x][y], pos))
self.put_pictures(x, y, label)
label.place(x= y*size, y = x*size)
self.matrixButtons[x][y].place(x= y*size, y = x*size)
def put_pictures(self, x, y, label):
if self.matrix[x][y] == 0: label.config(image = self.zero)
if self.matrix[x][y] == 1: label.config(image = self.one)
if self.matrix[x][y] == 2: label.config(image = self.two)
if self.matrix[x][y] == 3: label.config(image = self.three)
if self.matrix[x][y] == 4: label.config(image = self.four)
if self.matrix[x][y] == 5: label.config(image = self.five)
if self.matrix[x][y] == 6: label.config(image = self.six)
if self.matrix[x][y] == 7: label.config(image = self.seven)
def images(self, gameSize):
if gameSize == 'big':
self.zero = PhotoImage(file = "images/bigGame/zero.png")
self.one = PhotoImage(file = "images/bigGame/one.png")
self.two = PhotoImage(file = "images/bigGame/two.png")
self.three = PhotoImage(file = "images/bigGame/three.png")
self.four = PhotoImage(file = "images/bigGame/four.png")
self.five = PhotoImage(file = "images/bigGame/five.png")
self.six = PhotoImage(file = "images/bigGame/six.png")
self.seven = PhotoImage(file = "images/bigGame/seven.png")
self.mine = PhotoImage(file = "images/bigGame/mine.png")
self.explosion= PhotoImage(file = "images/bigGame/explosion.png")
self.flag = PhotoImage(file = "images/bigGame/flag.png")
self.bgButton = PhotoImage(file = "images/bigGame/backgroundButton.png")
if gameSize == 'small':
self.zero = PhotoImage(file = "images/smallGame/zero.png")
self.one = PhotoImage(file = "images/smallGame/one.png")
self.two = PhotoImage(file = "images/smallGame/two.png")
self.three = PhotoImage(file = "images/smallGame/three.png")
self.four = PhotoImage(file = "images/smallGame/four.png")
self.five = PhotoImage(file = "images/smallGame/five.png")
self.six = PhotoImage(file = "images/smallGame/six.png")
self.seven = PhotoImage(file = "images/smallGame/seven.png")
self.mine = PhotoImage(file = "images/smallGame/mine.png")
self.explosion= PhotoImage(file = "images/smallGame/explosion.png")
self.flag = PhotoImage(file = "images/smallGame/flag.png")
self.bgButton = PhotoImage(file = "images/smallGame/backgroundButton.png")
def left_click(self, button, pos):
x, y = pos
self.deletedButtons = []
button.destroy()
self.deletedButtons.append(button)
if self.matrix[x][y] == 0:
self.delete_blank_buttons(x, y)
def delete_blank_buttons(self, x, y):
for func in self.neighbors:
try:
xN, yN = func(x, y)
if xN < 0 or yN < 0:
raise IndexError
if self.matrix[xN][yN] != 'M':
if self.matrixButtons[xN][yN] not in self.deletedButtons:
if self.matrixButtons[xN][yN] not in self.flags:
self.matrixButtons[xN][yN].destroy()
self.deletedButtons.append(self.matrixButtons[xN][yN])
if self.matrix[xN][yN] == 0:
self.delete_blank_buttons(xN, yN)
except IndexError:
pass
def right_click(self, button, event):
if button['state'] == 'normal':
self.flags.append(button)
button.config(image = self.flag)
button['state'] = 'disabled'
self.victory()
else:
self.flags.remove(button)
button.config(image = self.bgButton)
button['state'] = 'normal'
self.victory()
def victory(self):
for button in self.mines:
if button not in self.flags:
return
if len(self.flags) != len(self.mines):
return
showinfo("You win!", "You win!")
self.window.destroy()
def game_over(self, button, label):
button.destroy()
label.config(image = self.explosion)
showinfo("Game Over!", "Game Over")
self.window.destroy()
if __name__ == '__main__':
while True:
rows = int(input("Type number of rows: "))
columns = int(input("Type number of columns: "))
mines = int(input("Type number of mines: "))
window = Tk()
matrix = Matrix(rows, columns, mines).main()
Minesweeper(window, matrix)
r = str(input("Continue? ")).upper()
if r[0] == 'N':
break
| 30.925234 | 119 | 0.512642 |
4a1b0f518917525eb00db9f6a88103b2567315a8
| 1,327 |
py
|
Python
|
goodline_iptv/xmltv.py
|
nsadovskiy/goodline_tv
|
6f745ae05a22031a36cb5cedc6b627cbf7ba6512
|
[
"MIT"
] | 1 |
2016-03-27T08:57:35.000Z
|
2016-03-27T08:57:35.000Z
|
goodline_iptv/xmltv.py
|
nsadovskiy/goodline_tv
|
6f745ae05a22031a36cb5cedc6b627cbf7ba6512
|
[
"MIT"
] | null | null | null |
goodline_iptv/xmltv.py
|
nsadovskiy/goodline_tv
|
6f745ae05a22031a36cb5cedc6b627cbf7ba6512
|
[
"MIT"
] | null | null | null |
from xml.dom import minidom
from aiofiles import open as open_file
from xml.etree.ElementTree import Element, SubElement, tostring
class XmltvBuilder(object):
def __init__(self, timezone='+0700'):
self.timezone = timezone
self.root = Element('tv')
self.root.set('generator-info-name', 'Preved')
self.root.set('generator-info-url', 'http://www.medved.info')
def add_channel(self, epg_id, name, icon):
channel = SubElement(self.root, 'channel', id=epg_id)
name_element = SubElement(channel, 'display-name')
name_element.text = name
icon_element = SubElement(channel, 'icon')
icon_element.text = icon
def add_track(self, epg, time_begin, time_end, name):
programme = SubElement(self.root, 'programme',
start=f'{time_begin.strftime("%Y%m%d%H%M%S")} {self.timezone}',
stop=f'{time_end.strftime("%Y%m%d%H%M%S")} {self.timezone}',
channel=f'{epg}'
)
title = SubElement(programme, 'title', lang='ru')
title.text = name
async def save(self, path):
async with open_file(path, mode='w') as f:
await f.write(self.to_string())
def to_string(self):
return minidom.parseString(tostring(self.root, encoding='unicode')).toprettyxml(indent=' ')
| 32.365854 | 99 | 0.632253 |
4a1b116b628eb5ca05179824f1b5cc53db4e7f88
| 1,802 |
py
|
Python
|
Source/Tests/index.py
|
JuanDiegoMontoya/2D_Game_Engine
|
b2b026de9d5e0953331cb5a4db55bb6cacf5b55e
|
[
"MIT"
] | 3 |
2021-07-12T08:06:13.000Z
|
2021-12-22T15:03:09.000Z
|
Source/Tests/index.py
|
JuanDiegoMontoya/3D_Voxel_Engine
|
b2b026de9d5e0953331cb5a4db55bb6cacf5b55e
|
[
"MIT"
] | null | null | null |
Source/Tests/index.py
|
JuanDiegoMontoya/3D_Voxel_Engine
|
b2b026de9d5e0953331cb5a4db55bb6cacf5b55e
|
[
"MIT"
] | 2 |
2021-02-07T04:20:51.000Z
|
2021-07-12T08:06:14.000Z
|
# some coordinate tests (not unit tests)
import operator as op
from functools import reduce
SIZE = 32
def ID3D(x, y, z, h, w):
return (x + h * (y + w * z))
def TestID3D():
for z in range(SIZE):
for x in range(SIZE):
for y in range(SIZE):
print(ID3D(x, y, z, SIZE, SIZE))
def TestBitAnd():
print(1)
def ncr(n, r):
if (r > n):
return 0
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer / denom
def nCr(n, k):
if (k > n):
return 0
sum = 1;
for i in range(1, k + 1):
sum = sum * ((n + 1 - i) / i) # '//' means integer division
return sum
def Bernstein(n, v, t):
return nCr(n, v) * pow(t, v) * pow(1 - t, n - v);
def DeCasteljau(t):
sum = 0;
for i in range(Degree + 1):
sum = sum + nCr(Degree, i) * Bernstein(Degree, i, t);
return sum;
WIDTH = 10
HEIGHT = 11
DEPTH = 12
# "1"D to 3D coord
def ID23D(index):
k = index // (WIDTH * HEIGHT)
j = (index % (WIDTH * HEIGHT)) // WIDTH
i = index - j * WIDTH - k * WIDTH * HEIGHT
return int(i),int(j),int(k)
def main():
#for x in range(-256, 256):
#print(x & SIZE)
"""# test n choose k function w/ "confirmed" correct one
for n in range(0, 5):
for r in range (0, 5):
if (ncr(n, r) != nCr(n, r)):
print(n, r, "incorrect")
#"""
"""# test DeCasteljau function for various t
for t in range(20):
print(DeCasteljau())
#"""
#"""# test flatten function (ID23D)
# assume 10*10*10 grid
for x in range(WIDTH):
for y in range(HEIGHT):
for z in range(DEPTH):
index = x + y * WIDTH + z * WIDTH * HEIGHT
i, j, k = ID23D(index)
print(x, y, z)
print(i, j, k)
print((x,y,z) == (i,j,k))
print()
#"""
if __name__ == '__main__':
main()
| 21.710843 | 62 | 0.54495 |
4a1b12414bcc56bf3d31c02cc2c8fad0676ad374
| 161,315 |
py
|
Python
|
pandas/io/pytables.py
|
JimStearns206/pandas
|
ceaf85233d434a226b23f891465b4abfdc602e46
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/io/pytables.py
|
JimStearns206/pandas
|
ceaf85233d434a226b23f891465b4abfdc602e46
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/io/pytables.py
|
JimStearns206/pandas
|
ceaf85233d434a226b23f891465b4abfdc602e46
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime, date
import time
import re
import copy
import itertools
import warnings
import os
from pandas.core.dtypes.common import (
is_list_like,
is_categorical_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_datetime64_dtype,
_ensure_object,
_ensure_int64,
_ensure_platform_int)
from pandas.core.dtypes.missing import array_equivalent
import numpy as np
from pandas import (Series, DataFrame, Panel, Panel4D, Index,
MultiIndex, Int64Index, isnull, concat,
SparseSeries, SparseDataFrame, PeriodIndex,
DatetimeIndex, TimedeltaIndex)
from pandas.core import config
from pandas.io.common import _stringify_path
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.base import StringMixin
from pandas.io.formats.printing import adjoin, pprint_thing
from pandas.errors import PerformanceWarning
from pandas.core.common import _asarray_tuplesafe
from pandas.core.algorithms import match, unique
from pandas.core.categorical import Categorical, _factorize_from_iterables
from pandas.core.internals import (BlockManager, make_block,
_block2d_to_blocknd,
_factor_indexer, _block_shape)
from pandas.core.index import _ensure_index
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
from pandas.core.config import get_option
from pandas.core.computation.pytables import Expr, maybe_expression
from pandas._libs import tslib, algos, lib
from distutils.version import LooseVersion
# versioning attribute
_version = '0.15.2'
# encoding
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""Ensure that an index / column name is a str (python 3) or
unicode (python 2); otherwise they may be np.string dtype.
Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, compat.string_types):
name = compat.text_type(name)
return name
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automaticaly a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u('f'): 'fixed',
u('fixed'): 'fixed',
u('t'): 'table',
u('table'): 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u('series'),
SparseSeries: u('sparse_series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
Panel4D: u('ndim'),
}
# storer class map
_STORER_MAP = {
u('Series'): 'LegacySeriesFixed',
u('DataFrame'): 'LegacyFrameFixed',
u('DataMatrix'): 'LegacyFrameFixed',
u('series'): 'SeriesFixed',
u('sparse_series'): 'SparseSeriesFixed',
u('frame'): 'FrameFixed',
u('sparse_frame'): 'SparseFrameFixed',
u('wide'): 'PanelFixed',
}
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
u('appendable_panel'): 'AppendablePanelTable',
u('appendable_ndim'): 'AppendableNDimTable',
u('worm'): 'WORMTable',
u('legacy_frame'): 'LegacyFrameTable',
u('legacy_panel'): 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2],
Panel4D: [1, 2, 3],
}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < '3.0.0':
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == 'strict')
except:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, **kwargs):
""" read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : path (string), buffer, or path object (pathlib.Path or
py._path.local.LocalPath) to read from
.. versionadded:: 0.19.0 support for pathlib, py.path.
key : group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
where : list of Term (or convertable) objects, optional
start : optional, integer (defaults to None), row number to start
selection
stop : optional, integer (defaults to None), row number to stop
selection
columns : optional, a list of columns that if not None, will limit the
return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
The selected object
"""
if kwargs.get('mode', 'a') not in ['r', 'r+', 'a']:
raise ValueError('mode {0} is not allowed while performing a read. '
'Allowed modes are r, r+ and a.'
.format(kwargs.get('mode')))
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
path_or_buf = _stringify_path(path_or_buf)
if not isinstance(path_or_buf, string_types):
raise NotImplementedError('Support for generic buffers has not '
'been implemented.')
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise compat.FileNotFoundError(
'File %s does not exist' % path_or_buf)
store = HDFStore(path_or_buf, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError('No dataset in HDF5 file.')
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError('key must be provided when HDF5 file '
'contains multiple datasets.')
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
try:
store.close()
except:
pass
raise
def _is_metadata_of(group, parent_group):
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == 'meta':
return True
current = current._v_parent
return False
class HDFStore(StringMixin):
"""
dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default 0
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc', None}, default None
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> from pandas import DataFrame
>>> from numpy.random import randn
>>> bar = DataFrame(randn(10, 4))
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables # noqa
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem '
'importing'.format(ex=str(ex)))
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
"complib only supports {libs} compression.".format(
libs=tables.filters.all_complibs))
self._path = _stringify_path(path)
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
try:
return self.get(name)
except:
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __contains__(self, key):
""" check for existance of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append("[invalid_HDFStore node: %s]"
% pprint_thing(detail))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complib is not None:
if self._complevel is None:
self._complevel = 9
self._filters = _tables().Filters(self._complevel,
self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError(
"PyTables [{version}] no longer supports opening multiple "
"files\n"
"even in read-only mode on this HDF5 version "
"[{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
"which allows\n"
"files to be opened multiple times at once\n"
.format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existant file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : type of object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns, **kwargs)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
"object [%s] is not a table, and cannot be used in all "
"select as multiple" % t.pathname
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [t.read(where=_where, columns=columns, start=_start,
stop=_stop, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
data_columns : list of columns to create as data columns, or True to
use all columns. See
`here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
if s is None:
raise KeyError('No object named %s in the file' % key)
# remove the node
if where is None and start is None and stop is None:
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
format: 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex_axis(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Parameters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_nodes()
if (getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u('table')))
]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
return None
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [%s] [group->%s,"
"value->%s,format->%s,append->%s,kwargs->%s]"
% (t, group, type(value), format, append, kwargs)
)
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u('_table')
# a storer node
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determin the tt
if value is not None:
if pt == u('series_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
elif index.nlevels > 1:
tt = u('appendable_multiframe')
elif pt == u('wide_table'):
tt = u('appendable_panel')
elif pt == u('ndim_table'):
tt = u('appendable_ndim')
else:
# distiguish between a frame/table
tt = u('legacy_panel')
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
except:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
warnings.warn(
"get_store is deprecated and be "
"removed in a future version\n"
"HDFStore(path, **kwargs) is the replacement",
FutureWarning,
stacklevel=6)
return HDFStore(path, **kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the refered storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where, start=self.start,
stop=self.stop)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "%s_kind" % name
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos']])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append, **kwargs):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append, **kwargs):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u('string'):
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [%s] in [%s] "
"column but\nthis column has a limit of [%s]!\n"
"Consider using min_itemsize to preset the sizes on "
"these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError("incompatible kind in col [%s - %s]" %
(existing_kind, self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [%s] for [%s], existing_value [%s] "
"conflicts with new value [%s]"
% (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this colummn """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this colummn """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if new_metadata is not None and cur_metadata is not None \
and not array_equivalent(new_metadata, cur_metadata):
raise ValueError("cannot append a categorical with "
"different categories to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz', 'ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_%d' % i
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search("values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
except:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None,
block=None, **kwargs):
super(DataCol, self).__init__(values=values, kind=kind, typ=typ,
cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u("%s_dtype" % self.name)
self.meta = meta
self.meta_attr = u("%s_meta" % self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos']])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
elif dtype.startswith(u('complex')):
self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
self.kind = 'datetime'
elif dtype.startswith(u('timedelta')):
self.kind = 'timedelta'
elif dtype.startswith(u('bool')):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [%s] in [%s]" % (dtype, self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel())
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [%s] because\n"
"its data contents are [%s] object dtype"
% (item, inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt%sCol" % kind[4:]
else:
col_name = "%sCol" % kind.capitalize()
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(
itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u('datetime64'):
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u('timedelta64'):
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u('date'):
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u('datetime'):
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u('category'):
# we have a categorical
categories = self.metadata
codes = self.data.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
mask = isnull(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum().values
self.data = Categorical.from_codes(codes,
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u('string'):
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding)
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this colummn """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
self.version = self.version + (0,)
except:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[%s]" % ','.join([pprint_thing(x) for x in s])
return "%-12.12s (shape->%s)" % (self.pandas_type, s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if where is None and start is None and stop is None:
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = dict([(v, k)
for k, v in compat.iteritems(_index_type_map)])
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
return DatetimeIndex._simple_new(values, None, freq=freq,
tz=tz)
return f
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
return PeriodIndex._simple_new(values, None, freq=freq)
return f
return klass
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
data = node[start:stop]
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = data[0]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = data
if dtype == u('datetime64'):
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u('timedelta64'):
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key, **kwargs):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
if variety == u('multi'):
return self.read_multi_index(key, **kwargs)
elif variety == u('block'):
return self.read_block_index(key, **kwargs)
elif variety == u('sparseint'):
return self.read_sparse_intindex(key, **kwargs)
elif variety == u('regular'):
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
raise TypeError('unrecognized index variety: %s' % variety)
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '%s_variety' % key, 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '%s_variety' % key, 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '%s_variety' % key, 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
converted = _convert_index(index, self.encoding,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('%s_blocs' % key, index.blocs)
self.write_array('%s_blengths' % key, index.blengths)
setattr(self.attrs, '%s_length' % key, index.length)
def read_block_index(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
blocs = self.read_array('%s_blocs' % key, **kwargs)
blengths = self.read_array('%s_blengths' % key, **kwargs)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('%s_indices' % key, index.indices)
setattr(self.attrs, '%s_length' % key, index.length)
def read_sparse_intindex(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
indices = self.read_array('%s_indices' % key, **kwargs)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
conv_level = _convert_index(lev, self.encoding,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self.write_array(label_key, lab)
def read_multi_index(self, key, **kwargs):
nlevels = getattr(self.attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self.read_index_node(getattr(self.group, level_key),
**kwargs)
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = self.read_array(label_key, **kwargs)
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=True)
def read_index_node(self, node, start=None, stop=None):
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
index_class = self._alias_to_class(getattr(node._v_attrs,
'index_class', ''))
factory = self._get_index_factory(index_class)
kwargs = {}
if u('freq') in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u('tz') in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u('date'), u('datetime')):
index = factory(_unconvert_index(data, kind,
encoding=self.encoding),
dtype=object, **kwargs)
else:
index = factory(_unconvert_index(data, kind,
encoding=self.encoding), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if is_categorical_dtype(value):
raise NotImplementedError('Cannot store a category dtype in '
'a HDF5 dataset that uses format='
'"fixed". Use format="table".')
if not empty_array:
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel())
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if is_datetime64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key, start=None, stop=None):
node = getattr(self.group, key)
data = node[start:stop]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u('series')
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except:
return None
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index', **kwargs)
values = self.read_array('values', **kwargs)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseFixed(GenericFixed):
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super(SparseFixed, self).validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
class SparseSeriesFixed(SparseFixed):
pandas_kind = u('sparse_series')
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u('block'),
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(SparseFixed):
pandas_kind = u('sparse_frame')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_%s' % c
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block%d_items' % i)
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except:
return None
def read(self, start=None, stop=None, **kwargs):
# start, stop applied to rows, so 0th axis only
kwargs = self.validate_read(kwargs)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index('axis%d' % i, start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block%d_items' % i)
values = self.read_array('block%d_values' % i,
start=_start, stop=_stop)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError(
"Columns index has to be unique for fixed format")
self.write_index('axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block%d_values' % i, blk.values, items=blk_items)
self.write_index('block%d_items' % i, blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u('frame')
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u('wide')
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u('wide_table')
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[%s]" % ','.join(
self.data_columns) if len(self.data_columns) else ''
ver = ''
if self.is_old_version:
ver = "[%s]" % '.'.join([str(x) for x in self.version])
return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
self.pandas_type, ver, self.table_type_short, self.nrows,
self.ncols, ','.join([a.name for a in self.index_axes]), dc
)
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError("incompatible table_type with existing [%s - %s]" %
(other.table_type, self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [%s] on appending data [%s] "
"vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
"invalid combinate of [%s] on appending data [%s] vs "
"current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [
c.name for c in self.values_axes if c.metadata is not None]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u('table') in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum([len(a.values) for a in self.values_axes])
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError(
'Columns containing complex values can be stored '
'but cannot'
' be indexed when using table format. Either use '
'fixed format, set index=False, or do not include '
'the columns containing complex values to '
'data_columns when initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis),
np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(np.array(sorted(append_axis)),
np.array(sorted(exist_axis))):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict([(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)])
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(pprint_thing(item) for
item in items))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s"
% (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = _ensure_index(getattr(obj, field).values)
filt = _ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = dict([(a.cname, a.typ) for a in self.axes])
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None, **kwargs):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [%s] can not be extracted individually; it is "
"not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding
).take_data(),
a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u('worm')
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indicies and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibily) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u('legacy')
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
lst_vals = [a.values for a in self.index_axes]
labels, levels = _factorize_from_iterables(lst_vals)
# labels and levels are tuples but lists are expected
labels = list(labels)
levels = list(levels)
N = [len(lvl) for lvl in levels]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
_ensure_int64(key), np.prod(N))
sorter = _ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape(
(sorted_values.shape[0], 1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index.values
unique_tuples = lib.fast_unique(tuple_index)
unique_tuples = _asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = _ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False)._consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u('frame_table')
table_type = u('legacy_frame')
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u('legacy_panel')
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
self._handle.create_table(self.group, **options)
else:
pass
# table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isnull(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception("cannot create row-data -> %s" % detail)
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError("tables cannot write this data -> %s" % detail)
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(
self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
l = Series(values).sort_values()
ln = len(l)
if ln:
# construct groups of consecutive rows
diff = l.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u('frame_table')
table_type = u('appendable_frame')
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# _ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_series')
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_multiseries')
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
_re_levels = re.compile("^level_\d+$")
@property
def table_type_short(self):
return u('appendable_multi')
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u('appendable_panel')
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
table_type = u('appendable_ndim')
ndim = 4
obj_type = Panel4D
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = _ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = _ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = _ensure_index(labels.unique())
if other is not None:
labels = _ensure_index(other.unique()) & labels
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except:
idx = info[name] = dict()
return idx
# tz to/from coercion
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = tslib.get_timezone(tz)
if zone is None:
zone = tslib.tot_seconds(tz.utcoffset())
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
name = getattr(values, 'name', None)
values = values.ravel()
tz = tslib.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
# avoid to store ndarray of Period objects
return IndexCol(index._values, 'integer', atom,
freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime64'):
index = DatetimeIndex(data)
elif kind == u('timedelta64'):
index = TimedeltaIndex(data)
elif kind == u('datetime'):
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u('date'):
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u('integer'), u('float')):
index = np.asarray(data)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
elif kind == u('object'):
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime'):
index = lib.time64_to_datetime(data)
elif kind in (u('integer')):
index = np.asarray(data, dtype=object)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _convert_string_array(data, encoding, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(
encoding).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
itemsize = lib.max_len_string_array(_ensure_object(data.ravel()))
data = np.asarray(data, dtype="S%d" % itemsize)
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = lib.max_len_string_array(_ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = lib.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lib.convert_timestamps
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u('datetime'), u('datetime64'), u('string')):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertable to)
start, stop: indicies to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None, **kwargs):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable refrences must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start,
stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
| 33.903951 | 104 | 0.555088 |
4a1b128fa7c0383970b67145023352fde31f964f
| 246 |
py
|
Python
|
test/tiles/npvector_test.py
|
liz-is/clodius
|
7e4a0d5f646a9af625c97e07859d914e72c83f0e
|
[
"MIT"
] | null | null | null |
test/tiles/npvector_test.py
|
liz-is/clodius
|
7e4a0d5f646a9af625c97e07859d914e72c83f0e
|
[
"MIT"
] | 1 |
2019-03-28T20:08:01.000Z
|
2019-03-28T20:08:01.000Z
|
test/tiles/npvector_test.py
|
4dn-dcic/clodius
|
aa31b3d90a5a9fec883c20cab31ad4d347cd52cd
|
[
"MIT"
] | null | null | null |
import numpy as np
import clodius.tiles.npvector as hgnv
def test_npvector():
array = np.array(range(100))
# print('ts:', hgnv.tileset_info(array))
assert('max_width' in hgnv.tileset_info(array))
tile = hgnv.tiles(array, 0, 0)
| 22.363636 | 51 | 0.686992 |
4a1b130dcb42bcbdeabcd4be51db950239cc2e9a
| 187 |
py
|
Python
|
deepspeed/module_inject/__init__.py
|
samyam/DeepSpeed
|
22aca3694ad4f01d13813b9630caac8683bb22e9
|
[
"MIT"
] | null | null | null |
deepspeed/module_inject/__init__.py
|
samyam/DeepSpeed
|
22aca3694ad4f01d13813b9630caac8683bb22e9
|
[
"MIT"
] | null | null | null |
deepspeed/module_inject/__init__.py
|
samyam/DeepSpeed
|
22aca3694ad4f01d13813b9630caac8683bb22e9
|
[
"MIT"
] | null | null | null |
from .replace_module import replace_transformer_layer
from .module_quantize import quantize_transformer_layer
from .replace_policy import DSPolicy, HFBertLayerPolicy, MegatronLayerPolicy
| 46.75 | 76 | 0.898396 |
4a1b132a3c59cc1a84aeea9103440ad4c817f490
| 2,411 |
py
|
Python
|
mmdet/cv_core/utils/coco_creator.py
|
Karybdis/mmdetection-mini
|
cf255db8037cfeecb7927268d018432f5771ab2e
|
[
"MIT"
] | 834 |
2020-09-08T08:00:04.000Z
|
2022-03-29T01:25:55.000Z
|
mmdet/cv_core/utils/coco_creator.py
|
mgnfcnt-ec/mmdet
|
46647fba610dfb396569d7910058c707d67d4ec4
|
[
"Apache-2.0"
] | 44 |
2020-09-16T14:21:58.000Z
|
2022-03-07T04:28:39.000Z
|
mmdet/cv_core/utils/coco_creator.py
|
mgnfcnt-ec/mmdet
|
46647fba610dfb396569d7910058c707d67d4ec4
|
[
"Apache-2.0"
] | 161 |
2020-09-10T03:26:35.000Z
|
2022-03-22T11:25:04.000Z
|
import datetime
import json
import os
from .pycococreatortools import create_image_info, create_annotation_info
class CocoCreator(object):
def __init__(self, categories, year=2020, out_dir='./', save_name='temp.json'):
INFO = {
"description": "Dataset",
"url": "https://github.com/hhaAndroid/mmdetection-mini",
"version": "1.0.0",
"year": year,
"contributor": "hha",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
self.check_categories(categories)
self.coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": categories,
"images": [],
"annotations": []
}
self.out_dir = out_dir
self.save_name = save_name
def check_categories(self, categories):
"""
example:
[
{
'id': 1, # 类别1
'name': 'power',
'supercategory': 'object',
}
{
'id': 2, # 类别2
'name': 'circle',
'supercategory': 'shape',
}
]
"""
assert isinstance(categories, list)
assert isinstance(categories[0], dict)
def create_image_info(self, image_id, file_name, image_size):
image_info = create_image_info(image_id, file_name, image_size)
self.coco_output["images"].append(image_info)
def create_annotation_info(self, segmentation_id, image_id, category_info, binary_mask=None, bounding_box=None,
image_size=None, tolerance=2):
annotation_info = create_annotation_info(segmentation_id, image_id, category_info, binary_mask, image_size,
tolerance,
bounding_box)
if annotation_info is not None:
self.coco_output["annotations"].append(annotation_info)
def dump(self):
out_file = os.path.join(self.out_dir, 'annotations', self.save_name)
with open(out_file, 'w') as output_json_file:
json.dump(self.coco_output, output_json_file)
| 33.486111 | 115 | 0.546246 |
4a1b135ed6a6f15fe07a5f53af2739f8e8ddb5b4
| 181 |
py
|
Python
|
automate.py
|
ismaelsadeeq/automate-
|
b0ad3cf055bab70340cb7973a3a4b0e25d8e3389
|
[
"MIT"
] | null | null | null |
automate.py
|
ismaelsadeeq/automate-
|
b0ad3cf055bab70340cb7973a3a4b0e25d8e3389
|
[
"MIT"
] | null | null | null |
automate.py
|
ismaelsadeeq/automate-
|
b0ad3cf055bab70340cb7973a3a4b0e25d8e3389
|
[
"MIT"
] | null | null | null |
import pyautogui
import time
text = input("Enter the text you want to send")
i=0
while True:
time.sleep(2)
pyautogui.typewrite(text)
time.sleep(1)
pyautogui.press('enter')
| 16.454545 | 47 | 0.723757 |
4a1b13d1f570b83f38c360f22dbbe09c7ab748e3
| 6,527 |
py
|
Python
|
minetest/mods/discordmt/server.py
|
ShadyFox2004/minetest-server
|
1310fd2a8c0bee9e9e718257ed136040e34d213e
|
[
"MIT"
] | 1 |
2021-06-30T20:50:19.000Z
|
2021-06-30T20:50:19.000Z
|
minetest/mods/discordmt/server.py
|
ShadyFox2004/minetest-server
|
1310fd2a8c0bee9e9e718257ed136040e34d213e
|
[
"MIT"
] | null | null | null |
minetest/mods/discordmt/server.py
|
ShadyFox2004/minetest-server
|
1310fd2a8c0bee9e9e718257ed136040e34d213e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
from aiohttp import web
import aiohttp
import discord
from discord.ext import commands
import asyncio
import json
import time
import configparser
import re
config = configparser.ConfigParser()
config.read('relay.conf')
class Queue():
def __init__(self):
self.queue = []
def add(self, item):
self.queue.append(item)
def get(self):
if len(self.queue) >=1:
item = self.queue[0]
del self.queue[0]
return item
else:
return None
def get_all(self):
items = self.queue
self.queue = []
return items
def isEmpty(self):
return len(self.queue) == 0
def clean_invites(string):
return ' '.join([word for word in string.split() if not ('discord.gg' in word) and not ('discordapp.com/invite' in word)])
outgoing_msgs = Queue()
command_queue = Queue()
login_queue = Queue()
prefix = config['BOT']['command_prefix']
bot = commands.Bot(command_prefix=prefix)
channel_id = int(config['RELAY']['channel_id'])
connected = False
port = int(config['RELAY']['port'])
token = config['BOT']['token']
logins_allowed = True if config['RELAY']['allow_logins'] == 'true' else False
do_clean_invites = True if config['RELAY']['clean_invites'] == 'true' else False
do_use_nicknames = True if config['RELAY']['use_nicknames'] == 'true' else False
last_request = 0
channel = None
authenticated_users = {}
def check_timeout():
return time.time() - last_request <= 1
async def get_or_fetch_user(user_id):
user = bot.get_user(user_id)
if user is None:
user = await bot.fetch_user(user_id)
if user is None:
print(f'Failed to fetch user {user_id!r}.')
return user
async def handle(request):
global last_request
last_request = time.time()
text = await request.text()
try:
data = json.loads(text)
if data['type'] == 'DISCORD-RELAY-MESSAGE':
msg = discord.utils.escape_mentions(data['content'])[0:2000]
r = re.compile(r'\x1b(T|F|E|\(T@[^\)]*\))')
msg = r.sub('', msg)
if 'context' in data.keys():
id = int(data['context'])
user = await get_or_fetch_user(id)
if user is not None:
await user.send(msg)
else:
await channel.send(msg)
return web.Response(text = 'Acknowledged') # discord.send should NOT block extensively on the Lua side
if data['type'] == 'DISCORD_LOGIN_RESULT':
user_id = int(data['user_id'])
user = await get_or_fetch_user(user_id)
if user is not None:
if data['success'] is True:
authenticated_users[user_id] = data['username']
await user.send('Login successful.')
else:
await user.send('Login failed.')
except:
pass
response = json.dumps({
'messages' : outgoing_msgs.get_all(),
'commands' : command_queue.get_all(),
'logins' : login_queue.get_all()
})
return web.Response(text = response)
app = web.Application()
app.add_routes([web.get('/', handle),
web.post('/', handle)])
@bot.event
async def on_ready():
global connected
if not connected:
connected = True
global channel
channel = await bot.fetch_channel(channel_id)
@bot.event
async def on_message(message):
global outgoing_msgs
if check_timeout():
if (message.channel.id == channel_id) and (message.author.id != bot.user.id):
msg = {
'author': message.author.name if not do_use_nicknames else message.author.display_name,
'content': message.content.replace('\n', '/')
}
if do_clean_invites:
msg['content'] = clean_invites(msg['content'])
if msg['content'] != '':
outgoing_msgs.add(msg)
await bot.process_commands(message)
@bot.command(help='Runs an ingame command from Discord.')
async def cmd(ctx, command, *, args=''):
if not check_timeout():
await ctx.send("The server currently appears to be down.")
return
if ((ctx.channel.id != channel_id) and ctx.guild is not None) or not logins_allowed:
return
if ctx.author.id not in authenticated_users.keys():
await ctx.send('Not logged in.')
return
command = {
'name': authenticated_users[ctx.author.id],
'command': command,
'params': args.replace('\n', '')
}
if ctx.guild is None:
command['context'] = str(ctx.author.id)
command_queue.add(command)
@bot.command(help='Logs into your ingame account from Discord so you can run commands. You should only run this command in DMs with the bot.')
async def login(ctx, username, password=''):
if not logins_allowed:
return
if ctx.guild is not None:
await ctx.send(ctx.author.mention+' You\'ve quite possibly just leaked your password; it is advised that you change it at once.\n*This message will be automatically deleted*', delete_after = 10)
return
login_queue.add({
'username' : username,
'password' : password,
'user_id' : str(ctx.author.id)
})
if not check_timeout():
await ctx.send("The server currently appears to be down, but your login attempt has been added to the queue and will be executed as soon as the server returns.")
@bot.command(help='Lists connected players and server information.')
async def status(ctx, *, args=None):
if not check_timeout():
await ctx.send("The server currently appears to be down.")
return
if ((ctx.channel.id != channel_id) and ctx.guild is not None):
return
data = {
'name': 'discord_relay',
'command': 'status',
'params': '',
}
if ctx.guild is None:
data['context'] = str(ctx.author.id)
command_queue.add(data)
async def runServer():
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, 'localhost', port)
await site.start()
async def runBot():
await bot.login(token)
await bot.connect()
try:
print('='*37+'\nStarting relay. Press Ctrl-C to exit.\n'+'='*37)
loop = asyncio.get_event_loop()
futures = asyncio.gather(runBot(), runServer())
loop.run_until_complete(futures)
except (KeyboardInterrupt, SystemExit):
sys.exit()
| 31.531401 | 202 | 0.613452 |
4a1b1402b2afd8a02a9a80b11c173cb2aba193e7
| 7,567 |
py
|
Python
|
run.py
|
Manasseh-Kinyua/passLocker
|
09589628bbcda14cb59b1dc56a76eaf768a53760
|
[
"MIT"
] | null | null | null |
run.py
|
Manasseh-Kinyua/passLocker
|
09589628bbcda14cb59b1dc56a76eaf768a53760
|
[
"MIT"
] | null | null | null |
run.py
|
Manasseh-Kinyua/passLocker
|
09589628bbcda14cb59b1dc56a76eaf768a53760
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.9
from user import User
from user import Credentials
def logo():
print(" ____ _ _ ")
print(" | _ \ | | | | /\ ")
print(" | |_) ) ____ ___ ___ | | _____ _ _ | | / / ____ _ _ ")
print(" | __/ / _ |/ __ / __ | | / _ \| '_|| |/ / / __ \ | _| ")
print(" | | / (_| |\__ \ \__ \ | |___ ( (_) ) |_ | | \ \ | ___/ | | ")
print(" |_| \_____| ___/ ___/ |_____) \_____/|____| | \_\ \____ |_| ")
logo()
def create_new_user(username, password):
'''
Function to create a new user
'''
new_user = User(username, password)
return new_user
def save_users(user):
'''
Function to save new users.
'''
user.save_user()
def display_user():
'''
Function to display users in the user list.
'''
return user.display_users()
def login_user(username, password):
'''
Function that checks if a user exists, and logs them in.
'''
check_user = Credentials.verify_user(username, password)
return check_user
def create_new_credential(account, userName, password):
'''
Functon to create new credentials for a user.
'''
new_credential = Credentials(account, userName, password)
return new_credential
def save_credentials(credentials):
'''
Function that saves user credentials to the credentials list.
'''
credentials.save_credential()
def display_credential():
'''
Function to display the saved user credentials.
'''
return Credentials.display_credentials()
def del_credential(credentials):
'''
Functon to delete a credentials object.
'''
credentials.delete_credential()
def find_credential(account):
'''
Function that checks if a credential object is in the credentials class.
'''
return Credentials.find_by_account(account)
def generate_password():
'''
Functionthat generates an aotomatic password for the user if they choose that option.
'''
generated_pass = Credentials.generatePassword()
return generated_pass
def copy_password(account):
'''
Function that copies a user's password to the clipboard.
'''
return Credentials.copy_password(account)
def main():
print("Hello, Welocome to Pass Locker. We are a store for your Accounts Passwords.....\n Please ENTER one of the following short codes to proceed.\n 'cr' -> Create a New Account. \n 'h1' -> Already have an Account. \n")
short_code = input().lower().strip()
if short_code == 'cr':
print("Please Sign Up")
print('*' * 50)
username = input("Enter your username: ")
while True:
print("Please USE one of the followig to enter your own password or to have one selected for you.\n 'em' -> To enter own password.\n 'ap' -> To have an automatic password generated for you.\n")
choice = input().lower().strip()
if choice == 'em':
password = input("Please enter your password; ")
break
elif choice == 'ap':
password = generate_password()
else:
print("Invalid passwordchoice......Please try again with a correct choice.")
save_users(create_new_user(username, password))
print("8" * 85)
print(f"Hello {username}, You have created an account successfully.\n Your password id : {password}")
print("8" * 85)
elif short_code == 'h1':
print("*" * 50)
print("Please ENTER username and password to login...")
print("*" * 50)
username = input("Enter username:")
password = input("Enter password:")
login = login_user(username,password)
if login_user == login:
print(f"Hello {username}, Welcome to Pass Locker")
print('\n')
while True:
print("Please use the following short codes.\n crc -> To creatte new credentials.\n dc -> To display credentials.\n fc -> To find credentials.\n gp -> To generate new credentials.\n d -> To delete credentials. \n ex -> To exit the application.\n")
short_code = input().lower().strip()
if short_code == 'crc':
print("Create new credentials")
print("." * 20)
print("Enter Account Name....")
account = input().lower()
print("Enter the account username")
userName = input()
while True:
print("Choose: em -> to enter your own password.\n ap -> To have a password generated for you.\n")
choice = input().lower().strip()
if choice == 'em':
password = input("Please enter your password: \n")
break
elif choice == 'ap':
password = generate_password()
break
else:
print("Invalid choice....PLease try again with the correct choice.")
save_credentials(create_new_credential(account, userName, password))
print('\n')
print(f"Credentials for {account} have been created")
print('\n')
elif short_code == 'dc':
if display_credential():
print("Here is a list of your credentials:")
print("*" * 30)
print("_" * 30)
for credential in display_credential():
print(f"Account: {credential.account}\n UserName: {credential.userName}\n password: {credential.password}")
print("_" * 30)
print("*" * 30)
else:
print("You do not have aby credentials saved yet.....")
elif short_code == 'fc':
print("Please enter the account you want to find")
search_acc = input().lower()
if find_credential(search_acc):
search_credential = find_credential(search_acc)
print(f"Account: {search_credential.account}")
print("-" * 50)
print(f"userName: {search_credential.userName}\n Password: {search_credential.password}")
print("-" * 50)
else:
print("The name you entered is not an account name in available credentials")
print('\n')
elif short_code == 'd':
print("Enter the Account for the credential you want to delete")
search_acc = input().lower()
if find_credential(search_acc):
search_credential = find_credential(search_acc)
search_credential.del_credential()
print('\n')
print(f"{search_credential.account} has been successfully deleted!")
print('\n')
else:
print("The name you entered does not match any in our list")
elif short_code == 'gp':
password = generate_password()
print(f"{password} Generated successfully. Proceed to use it.")
elif short_code == 'ex':
print("Thank you for using Pass Locker....See you another time...BYE.....")
break
else:
print("Wrong entry....PLease use the short codes provided")
else:
print("Please always enter a valid input in order to use the application.")
if __name__ == '__main__':
main()
| 38.217172 | 255 | 0.556231 |
4a1b14534d4793e52e8e56632f5eb8427f4f2865
| 4,374 |
py
|
Python
|
contrib/seeds/generate-seeds.py
|
3mhuggu5hss/privat
|
5f715576bf02497ac207f682dce7c3fb94682feb
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
3mhuggu5hss/privat
|
5f715576bf02497ac207f682dce7c3fb94682feb
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
3mhuggu5hss/privat
|
5f715576bf02497ac207f682dce7c3fb94682feb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef PRIVAT_CHAINPARAMSSEEDS_H\n')
g.write('#define PRIVAT_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the privat network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // PRIVAT_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.467626 | 98 | 0.582076 |
4a1b1550b262083f76b6182c347ceb7a949e58d0
| 737 |
py
|
Python
|
predictPixelMovement.py
|
danieldrysn/opticalflow_backgroundestimation
|
fd73feb8473072e78a2b4fcf9da856c3e5e4f940
|
[
"BSD-3-Clause"
] | null | null | null |
predictPixelMovement.py
|
danieldrysn/opticalflow_backgroundestimation
|
fd73feb8473072e78a2b4fcf9da856c3e5e4f940
|
[
"BSD-3-Clause"
] | null | null | null |
predictPixelMovement.py
|
danieldrysn/opticalflow_backgroundestimation
|
fd73feb8473072e78a2b4fcf9da856c3e5e4f940
|
[
"BSD-3-Clause"
] | null | null | null |
# Predict Pixel Movement
# Daniel D. Doyle
# 2021-11-10
'''
Predict Pixel Movement umath.sing image rows and columns, focal length, pan, and tilt angles
'''
import math
# Returns predicted point based on size of the image, previous point, focal length, psi, and theta
def predictPixelMovement( rows, cols, previousPoint, foc, psi, theta ):
xo = previousPoint[0] - cols/2
yo = previousPoint[1] - rows/2
xn = foc*((xo - foc*math.tan(psi))/(xo*math.tan(psi)*math.cos(theta)-yo*(math.sin(theta)/math.cos(psi))+foc*math.cos(theta)))
yn = foc*((xo*math.sin(psi)*math.tan(theta)+yo+foc*math.cos(psi)*math.tan(theta))/(xo*math.sin(psi)-yo*math.tan(theta)+foc*math.cos(psi)))
return (int(xn + cols/2),int(yn + rows/2))
| 38.789474 | 140 | 0.682497 |
4a1b15f9a70d87a992c2c88e96244f41b5ee7a9a
| 1,340 |
py
|
Python
|
cpl/math/prime_factorization_query.py
|
yaumu3/cpl-python3
|
903d73195ecf606af41fb22fb74282aaca63d33a
|
[
"MIT"
] | 1 |
2021-03-05T15:48:23.000Z
|
2021-03-05T15:48:23.000Z
|
cpl/math/prime_factorization_query.py
|
yaumu3/cpl-python3
|
903d73195ecf606af41fb22fb74282aaca63d33a
|
[
"MIT"
] | null | null | null |
cpl/math/prime_factorization_query.py
|
yaumu3/cpl-python3
|
903d73195ecf606af41fb22fb74282aaca63d33a
|
[
"MIT"
] | null | null | null |
from typing import List
class SmallestPrimeFactors:
"""High speed prime factorizaton using smallest prime factors
Precompute the smallest prime factor for each integer less than or equal to `n`
by Eratosthenes sieve; tiem complexity O(n*log(log(n))).
Then, factorize integer `x` with time complexity O(log(x))
Based on `https://atcoder.jp/contests/abc177/editorial/82`.
Args:
n: Max number to factorize or judge primality.
Attributes:
spf: List of the smallest prime factor whose index is the corresponding integer.
"""
def __init__(self, n: int) -> None:
self.spf = list(range(n + 1))
self.spf[0] = -1
self.spf[1] = -1
for i in range(2, int(n ** 0.5) + 1):
if self.spf[i] == i:
for j in range(i * i, n + 1, i):
if self.spf[j] == j:
self.spf[j] = i
def is_prime(self, x: int) -> bool:
assert x < len(self.spf)
return self.spf[x] == x
def factor(self, x: int) -> List[int]:
assert x < len(self.spf)
if x < 2:
return []
res = []
while True:
if self.is_prime(x):
res.append(x)
break
res.append(self.spf[x])
x //= self.spf[x]
return res
| 30.454545 | 88 | 0.535075 |
4a1b16f231b4739be1ffe9fc298e3644ca0ce81c
| 391 |
py
|
Python
|
Sritter/asgi.py
|
lllapukk/Sritter
|
12eda47b56a0533dc6405dd5d37f903968ed624d
|
[
"MIT"
] | null | null | null |
Sritter/asgi.py
|
lllapukk/Sritter
|
12eda47b56a0533dc6405dd5d37f903968ed624d
|
[
"MIT"
] | null | null | null |
Sritter/asgi.py
|
lllapukk/Sritter
|
12eda47b56a0533dc6405dd5d37f903968ed624d
|
[
"MIT"
] | null | null | null |
"""
ASGI config for Sritter project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Sritter.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
4a1b16fdb0555e75119407ace00db7b9739e4490
| 6,615 |
py
|
Python
|
mindspore/nn/metrics/mean_surface_distance.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | 1 |
2021-07-03T06:52:20.000Z
|
2021-07-03T06:52:20.000Z
|
mindspore/nn/metrics/mean_surface_distance.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
mindspore/nn/metrics/mean_surface_distance.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MeanSurfaceDistance."""
from scipy.ndimage import morphology
import numpy as np
from mindspore._checkparam import Validator as validator
from .metric import Metric, rearrange_inputs
class MeanSurfaceDistance(Metric):
"""
This function is used to compute the Average Surface Distance from `y_pred` to `y` under the default setting.
Mean Surface Distance(MSD), the mean of the vector is taken. This tell us how much, on average, the surface varies
between the segmentation and the GT.
Args:
distance_metric (string): The parameter of calculating Hausdorff distance supports three measurement methods,
"euclidean", "chessboard" or "taxicab". Default: "euclidean".
symmetric (bool): if calculate the symmetric average surface distance between `y_pred` and `y`. In addition,
if sets ``symmetric = True``, the average symmetric surface distance between these two inputs
will be returned. Defaults: False.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import nn, Tensor
>>>
>>> x = Tensor(np.array([[3, 0, 1], [1, 3, 0], [1, 0, 2]]))
>>> y = Tensor(np.array([[0, 2, 1], [1, 2, 1], [0, 0, 1]]))
>>> metric = nn.MeanSurfaceDistance(symmetric=False, distance_metric="euclidean")
>>> metric.clear()
>>> metric.update(x, y, 0)
>>> mean_average_distance = metric.eval()
>>> print(mean_average_distance)
0.8047378541243649
"""
def __init__(self, symmetric=False, distance_metric="euclidean"):
super(MeanSurfaceDistance, self).__init__()
self.distance_metric_list = ["euclidean", "chessboard", "taxicab"]
distance_metric = validator.check_value_type("distance_metric", distance_metric, [str])
self.distance_metric = validator.check_string(distance_metric, self.distance_metric_list, "distance_metric")
self.symmetric = validator.check_value_type("symmetric", symmetric, [bool])
self.clear()
def clear(self):
"""Clears the internal evaluation result."""
self._y_pred_edges = 0
self._y_edges = 0
self._is_update = False
def _get_surface_distance(self, y_pred_edges, y_edges):
"""
Calculate the surface distances from `y_pred_edges` to `y_edges`.
Args:
y_pred_edges (np.ndarray): the edge of the predictions.
y_edges (np.ndarray): the edge of the ground truth.
"""
if not np.any(y_pred_edges):
return np.array([])
if not np.any(y_edges):
dis = np.full(y_edges.shape, np.inf)
else:
if self.distance_metric == "euclidean":
dis = morphology.distance_transform_edt(~y_edges)
elif self.distance_metric in self.distance_metric_list[-2:]:
dis = morphology.distance_transform_cdt(~y_edges, metric=self.distance_metric)
surface_distance = dis[y_pred_edges]
return surface_distance
@rearrange_inputs
def update(self, *inputs):
"""
Updates the internal evaluation result 'y_pred', 'y' and 'label_idx'.
Args:
inputs: Input 'y_pred', 'y' and 'label_idx'. 'y_pred' and 'y' are Tensor or numpy.ndarray. 'y_pred' is the
predicted binary image. 'y' is the actual binary image. 'label_idx', the data type of `label_idx`
is int.
Raises:
ValueError: If the number of the inputs is not 3.
TypeError: If the data type of label_idx not be int or float.
ValueError: If the value of label_idx is not in y_pred or y.
ValueError: If y_pred and y should have different shape.
"""
if len(inputs) != 3:
raise ValueError('MeanSurfaceDistance need 3 inputs (y_pred, y, label), but got {}.'.format(len(inputs)))
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
label_idx = inputs[2]
if not isinstance(label_idx, (int, float)):
raise TypeError("The data type of label_idx must be int or float, but got {}.".format(type(label_idx)))
if label_idx not in y_pred and label_idx not in y:
raise ValueError("The label_idx should be in y_pred or y, but {} is not.".format(label_idx))
if y_pred.size == 0 or y_pred.shape != y.shape:
raise ValueError("y_pred and y should have same shape, but got {}, {}.".format(y_pred.shape, y.shape))
if y_pred.dtype != bool:
y_pred = y_pred == label_idx
if y.dtype != bool:
y = y == label_idx
self._y_pred_edges = morphology.binary_erosion(y_pred) ^ y_pred
self._y_edges = morphology.binary_erosion(y) ^ y
self._is_update = True
def eval(self):
"""
Calculate mean surface distance.
Returns:
A float with mean surface distance.
Raises:
RuntimeError: If the update method is not called first, an error will be reported.
"""
if self._is_update is False:
raise RuntimeError('Call the update method before calling eval.')
mean_surface_distance = self._get_surface_distance(self._y_pred_edges, self._y_edges)
if mean_surface_distance.shape == (0,):
return np.inf
avg_surface_distance = mean_surface_distance.mean()
if not self.symmetric:
return avg_surface_distance
contrary_mean_surface_distance = self._get_surface_distance(self._y_edges, self._y_pred_edges)
if contrary_mean_surface_distance.shape == (0,):
return np.inf
contrary_avg_surface_distance = contrary_mean_surface_distance.mean()
return np.mean((avg_surface_distance, contrary_avg_surface_distance))
| 41.603774 | 119 | 0.636432 |
4a1b18af56e4410fa786f3e4e71e38fe55b3f495
| 5,789 |
py
|
Python
|
old/main_original.py
|
i207M/pytorch-cifar
|
df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
|
[
"MIT"
] | null | null | null |
old/main_original.py
|
i207M/pytorch-cifar
|
df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
|
[
"MIT"
] | null | null | null |
old/main_original.py
|
i207M/pytorch-cifar
|
df4417b6d0a25515ac82b5aa6151ae2135b2cd5c
|
[
"MIT"
] | null | null | null |
'''Train CIFAR10 with PyTorch.'''
import argparse
import os
import time
from pathlib import Path
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from models.resnet56 import ResNet56
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data2', train=True, download=True, transform=transform_train
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=4, pin_memory=True
)
testset = torchvision.datasets.CIFAR10(
root='./data2', train=False, download=True, transform=transform_test
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=100, shuffle=False, num_workers=4, pin_memory=True
)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
# net = VGG('VGG19')
# net = ResNet18()
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
# net = EfficientNetB0()
# net = RegNetX_200MF()
# net = SimpleDLA()
net = ResNet56()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/last.pth')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
# Original weight decay = 1e-4
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
# Original scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 150], last_epoch=start_epoch - 1
)
# Log
date_str = time.strftime('%Y_%m_%d-%H_%M_%S', time.localtime())
log_dir = Path('./runs') / date_str
wdir = log_dir / 'weights'
writer = SummaryWriter(log_dir)
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# progress_bar(
# batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
# (train_loss / (batch_idx + 1), 100. * correct / total, correct, total)
# )
print('#%d, Loss: %.3f | Acc: %.3f' % (epoch, train_loss / len(trainloader), 100. * correct / total))
writer.add_scalar('train/loss', train_loss / len(trainloader), epoch)
writer.add_scalar('train/acc', 100. * correct / total, epoch)
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# progress_bar(
# batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
# (test_loss / (batch_idx + 1), 100. * correct / total, correct, total)
# )
print('#%d, Loss: %.3f | Acc: %.3f' % (epoch, test_loss / len(testloader), 100. * correct / total))
writer.add_scalar('test/loss', test_loss / len(testloader), epoch)
writer.add_scalar('test/acc', 100. * correct / total, epoch)
# Save checkpoint.
acc = 100. * correct / total
if acc > best_acc:
best_acc = acc
save_checkpoint(wdir / 'best.pth', epoch)
def save_checkpoint(path, epoch: int):
global best_acc
print(f'Saving.. Epoch: {epoch}, Acc: {best_acc}')
state = {
'net': net.state_dict(),
'acc': best_acc,
'epoch': epoch,
}
torch.save(state, path)
if __name__ == '__main__':
os.makedirs(wdir, exist_ok=True)
for epoch in range(start_epoch, start_epoch + 200):
try:
train(epoch)
test(epoch)
scheduler.step()
except KeyboardInterrupt:
break
save_checkpoint(wdir / 'last.pth', epoch)
| 30.62963 | 105 | 0.643807 |
4a1b1a58925e03bd6db6d45cb4654b3c4f2ed010
| 631 |
py
|
Python
|
src/data/727.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/727.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/727.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
n, q = list(map(int, input().split()))
g = [[] for _ in range(n)]
for i in range(n - 1):
a, b = list(map(lambda x: int(x) - 1, input().split()))
g[a].append(b)
g[b].append(a)
from collections import deque
def bfs(v):
q = deque()
q.append(v)
d = [-1] * n
d[v] = 0
while q:
v = q.popleft()
for u in g[v]:
if d[u] != -1: continue
d[u] = d[v] + 1
q.append(u)
return d
a = bfs(0)
for i in range(q):
c, d = list(map(lambda x: int(x) - 1, input().split()))
if (a[c] - a[d]) % 2:
print('Road')
else:
print('Town')
| 17.527778 | 59 | 0.448494 |
4a1b1abe425bc18341d285c25017526ccf41c439
| 714 |
py
|
Python
|
recursion/tower_of_hanoi.py
|
Yasir323/Data-Structures-and-Algorithms-in-Python
|
b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0
|
[
"MIT"
] | null | null | null |
recursion/tower_of_hanoi.py
|
Yasir323/Data-Structures-and-Algorithms-in-Python
|
b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0
|
[
"MIT"
] | null | null | null |
recursion/tower_of_hanoi.py
|
Yasir323/Data-Structures-and-Algorithms-in-Python
|
b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0
|
[
"MIT"
] | null | null | null |
"""
Here is a high-level outline of how to move a tower
from the starting pole, to the goal pole, using an
intermediate pole:
1. Move a tower of height-1 to an intermediate pole,
using the final pole.
2. Move the remaining disk to the final pole.
3. Move the tower of height-1 from the intermediate
pole to the final pole using the original pole.
"""
def move_tower(height, from_tower, to_tower, with_tower):
if height >= 1:
move_tower(height - 1, from_tower, with_tower, to_tower)
move_disk(from_tower, to_tower)
move_tower(height - 1, with_tower, to_tower, from_tower)
def move_disk(ft, tt):
print("Moving disk from", ft, "to", tt)
move_tower(3, "A", "B", "C")
| 24.62069 | 64 | 0.691877 |
4a1b1b684342a34256d9dead36e0fda2baf41185
| 50 |
py
|
Python
|
maths/__init__.py
|
claywahlstrom/pack
|
86b70198a4b185611c2ce3d29df99dd01233a6ac
|
[
"BSD-2-Clause"
] | 2 |
2019-05-04T09:32:15.000Z
|
2021-02-08T08:38:23.000Z
|
maths/__init__.py
|
claywahlstrom/pack
|
86b70198a4b185611c2ce3d29df99dd01233a6ac
|
[
"BSD-2-Clause"
] | null | null | null |
maths/__init__.py
|
claywahlstrom/pack
|
86b70198a4b185611c2ce3d29df99dd01233a6ac
|
[
"BSD-2-Clause"
] | null | null | null |
__all__ = ['core', 'matlab', 'stats', 'vectors']
| 16.666667 | 48 | 0.58 |
4a1b1b6cf30d59ed8421ffd58130b9d143d2c3b2
| 755 |
py
|
Python
|
Listing_13-3.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
Listing_13-3.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
Listing_13-3.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
# Listing_13-3
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Function with two arguments
def printMyAddress(someName, houseNum):
print someName
print houseNum, # Comma makes houseNum and street print on the same line
print "Main Street"
print "Ottawa, Ontario, Canada"
print "K2M 2E9"
print
printMyAddress("Carter Sande", "45") # pass 2 arguments to the function
printMyAddress("Jack Black", "64")
printMyAddress("Tom Green", "22")
printMyAddress("Todd White", "36")
| 37.75 | 90 | 0.569536 |
4a1b1c55564f1e2f33aba74778d6246888c5f918
| 79 |
py
|
Python
|
Database/flask-sqlalchemy/models/__init__.py
|
amamov/cs001
|
5753f28e74e2330837d22142cff4713801c77a2d
|
[
"MIT"
] | 5 |
2021-02-21T17:10:03.000Z
|
2022-03-04T21:17:50.000Z
|
flask-sqlalchemy/models/__init__.py
|
amamov/pythonic
|
95f8f7dca9d01f11ecdf4b26b46afe41dc20b0d0
|
[
"MIT"
] | null | null | null |
flask-sqlalchemy/models/__init__.py
|
amamov/pythonic
|
95f8f7dca9d01f11ecdf4b26b46afe41dc20b0d0
|
[
"MIT"
] | 3 |
2021-02-25T17:53:57.000Z
|
2021-06-25T17:25:44.000Z
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from . import users
| 15.8 | 39 | 0.797468 |
4a1b1e2716cdf57a8f63417cf8d3c97d71062eb5
| 2,577 |
py
|
Python
|
tests/test_migrate.py
|
zalando-nakadi/bubuku
|
5738cc9309ed46e86fcad41b6fb580ddd69af8fd
|
[
"MIT"
] | 32 |
2017-10-17T09:59:46.000Z
|
2022-01-23T11:39:31.000Z
|
tests/test_migrate.py
|
zalando-nakadi/bubuku
|
5738cc9309ed46e86fcad41b6fb580ddd69af8fd
|
[
"MIT"
] | 91 |
2017-07-13T15:43:15.000Z
|
2022-02-21T13:06:35.000Z
|
tests/test_migrate.py
|
zalando-nakadi/bubuku
|
5738cc9309ed46e86fcad41b6fb580ddd69af8fd
|
[
"MIT"
] | 3 |
2018-04-19T13:13:00.000Z
|
2018-09-11T05:59:38.000Z
|
import unittest
from unittest.mock import MagicMock
from bubuku.features.migrate import MigrationChange
class TestMigrate(unittest.TestCase):
def test_migration_all_steps(self):
partitions = {
('test', 0): [1, 2, 3],
('test', 1): [2, 3, 1],
('test1', 0): [3, 2, 1],
}
zk = MagicMock()
zk.is_rebalancing = lambda: False
zk.load_partition_assignment = lambda: [(k[0], k[1], v) for k, v in partitions.items()]
result = {}
def _reallocate_partition(t, p, r):
result.update({(t, p): r})
return True
def _reallocate_partitions(items):
for item in items:
_reallocate_partition(*item)
return True
zk.reallocate_partition = _reallocate_partition
zk.reallocate_partitions = _reallocate_partitions
zk.get_broker_ids = lambda: [1, 2, 3, 4, 5, 6]
change = MigrationChange(zk, [1, 2, 3], [4, 5, 6], False)
while change.run([]):
pass
expected = {
('test', 0): [1, 2, 3, 4, 5, 6],
('test', 1): [2, 3, 1, 5, 6, 4],
('test1', 0): [3, 2, 1, 6, 5, 4],
}
assert expected == result
zk.load_partition_assignment = lambda: [(k[0], k[1], v) for k, v in expected.items()]
result.clear()
change = MigrationChange(zk, [1, 2, 3], [4, 5, 6], True)
while change.run([]):
pass
expected = {
('test', 0): [4, 5, 6],
('test', 1): [5, 6, 4],
('test1', 0): [6, 5, 4],
}
assert expected == result
def test_replica_generation_no_shrink(self):
change = MigrationChange(MagicMock(), [1, 2, 3], [4, 5, 6], False)
assert [4, 5, 6] == change._replace_replicas([4, 5, 6])
assert [1, 2, 3, 4, 5, 6] == change._replace_replicas([1, 2, 3])
assert [1, 2, 6, 4, 5] == change._replace_replicas([1, 2, 6])
assert [1, 6, 2, 4, 5] == change._replace_replicas([1, 6, 2])
assert [1, 6, 3, 4] == change._replace_replicas([1, 6, 3])
def test_replica_generation_shrink(self):
change = MigrationChange(MagicMock(), [1, 2, 3], [4, 5, 6], True)
assert [4, 5, 6] == change._replace_replicas([1, 2, 3])
assert [4, 5, 6] == change._replace_replicas([4, 2, 6])
assert [8, 5, 10] == change._replace_replicas([8, 2, 10])
assert [4, 8, 5] == change._replace_replicas([1, 8, 2])
assert [4, 5, 6] == change._replace_replicas([1, 2, 3, 4, 5, 6])
| 34.824324 | 95 | 0.523089 |
4a1b1e6a5c4183b5bcf79e1e184a2ac7bf214c26
| 903 |
py
|
Python
|
settings/__init__.py
|
anchalghale/auto_disenchanter
|
4edab1b72538b15bf8d665629f951db1612fa825
|
[
"Apache-2.0"
] | 7 |
2021-04-07T17:44:42.000Z
|
2022-02-13T05:47:11.000Z
|
settings/__init__.py
|
anchalghale/auto_disenchanter
|
4edab1b72538b15bf8d665629f951db1612fa825
|
[
"Apache-2.0"
] | 1 |
2021-08-20T09:11:38.000Z
|
2022-02-11T12:54:38.000Z
|
settings/__init__.py
|
anchalghale/auto_disenchanter
|
4edab1b72538b15bf8d665629f951db1612fa825
|
[
"Apache-2.0"
] | 3 |
2019-11-22T06:21:17.000Z
|
2020-06-16T07:25:23.000Z
|
''' Configuration file for the program '''
import time
import json
from types import SimpleNamespace
from urllib.parse import urljoin
import requests
from server import SERVER_URL, SERVER_AUTH
from utils import get_base_dir
BASE_DIR = get_base_dir()
def get_settings(logger, debug=False):
''' Parses the settings locally or from the server according to the debug value '''
if debug:
with open('settings.json') as file:
settings_ = json.load(file)
else:
while True:
try:
logger.log('Parsing settings from the server')
settings_ = requests.get(urljoin(SERVER_URL, 'api/v1/settings/'),
auth=SERVER_AUTH, timeout=30).json()
break
except requests.exceptions.RequestException:
time.sleep(10)
return SimpleNamespace(**settings_)
| 27.363636 | 87 | 0.632337 |
4a1b1eb34a118805355cfac526b9c9e62a90ebb8
| 1,228 |
py
|
Python
|
controk_webservice/employees/migrations/0001_initial.py
|
controk-sys/http-server
|
0be146140daab804fe2300678f5e69d1c9e292d8
|
[
"MIT"
] | null | null | null |
controk_webservice/employees/migrations/0001_initial.py
|
controk-sys/http-server
|
0be146140daab804fe2300678f5e69d1c9e292d8
|
[
"MIT"
] | 2 |
2016-10-25T23:31:58.000Z
|
2016-10-27T02:10:41.000Z
|
controk_webservice/employees/migrations/0001_initial.py
|
jourdanrodrigues/controk-webservice
|
0be146140daab804fe2300678f5e69d1c9e292d8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-16 16:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('addresses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('mobile', models.CharField(max_length=20, null=True)),
('phone', models.CharField(max_length=20, null=True)),
('cpf', models.CharField(max_length=14)),
('name', models.CharField(max_length=60)),
('observation', models.TextField(null=True)),
('role', models.CharField(max_length=40)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employees', to='addresses.Address')),
],
options={
'db_table': 'Employee',
},
),
]
| 34.111111 | 142 | 0.577362 |
4a1b209c89fc0dcd60563f637e02f0ea1e284eb0
| 1,361 |
py
|
Python
|
radiomicsfeatureextractionpipeline/backend/src/logic/dicom_file_reader/dicom_file_reader.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
radiomicsfeatureextractionpipeline/backend/src/logic/dicom_file_reader/dicom_file_reader.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | 6 |
2021-06-09T19:39:27.000Z
|
2021-09-30T16:41:40.000Z
|
radiomicsfeatureextractionpipeline/backend/src/logic/dicom_file_reader/dicom_file_reader.py
|
Maastro-CDS-Imaging-Group/SQLite4Radiomics
|
e3a7afc181eec0fe04c18da00edc3772064e6758
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List, Optional
from logic.entities.image import Image
class DicomFileReader(ABC):
"""
Abstract base class for implementation of the strategy pattern.
Used for reading DICOM files.
"""
def __init__(self, dicom_data_directory: str) -> None:
"""
Constructor DicomFileReader class
:param dicom_data_directory: The path to the directory where all DICOM files are stored
"""
self.dicom_data_directory: str = dicom_data_directory
def set_dicom_data_directory(self, dicom_data_directory: str) -> None:
"""
Sets the location of the DICOM files to a new directory
:param dicom_data_directory: The new path to the directory where all DICOM files are stored
"""
self.dicom_data_directory: str = dicom_data_directory
@abstractmethod
def read_dicom_file(self, image: Image) -> Optional[Image]:
"""
Reads the content of one single dicom file.
:param image: The image that needs to be loaded.
"""
pass
@abstractmethod
def read_multiple_dicom_files(self, images: List[Image]) -> Optional[List[Image]]:
"""
Reads the content of multiple dicom files
:param images: List of images that all needs to be loaded.
"""
pass
| 32.404762 | 99 | 0.667157 |
4a1b209f0e107f59a7948e9169b0698d5ec0368e
| 6,707 |
py
|
Python
|
kubernetes/client/models/v1_env_var_source.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_env_var_source.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_env_var_source.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1EnvVarSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'config_map_key_ref': 'V1ConfigMapKeySelector',
'field_ref': 'V1ObjectFieldSelector',
'resource_field_ref': 'V1ResourceFieldSelector',
'secret_key_ref': 'V1SecretKeySelector'
}
attribute_map = {
'config_map_key_ref': 'configMapKeyRef',
'field_ref': 'fieldRef',
'resource_field_ref': 'resourceFieldRef',
'secret_key_ref': 'secretKeyRef'
}
def __init__(self, config_map_key_ref=None, field_ref=None, resource_field_ref=None, secret_key_ref=None):
"""
V1EnvVarSource - a model defined in Swagger
"""
self._config_map_key_ref = None
self._field_ref = None
self._resource_field_ref = None
self._secret_key_ref = None
self.discriminator = None
if config_map_key_ref is not None:
self.config_map_key_ref = config_map_key_ref
if field_ref is not None:
self.field_ref = field_ref
if resource_field_ref is not None:
self.resource_field_ref = resource_field_ref
if secret_key_ref is not None:
self.secret_key_ref = secret_key_ref
@property
def config_map_key_ref(self):
"""
Gets the config_map_key_ref of this V1EnvVarSource.
Selects a key of a ConfigMap.
:return: The config_map_key_ref of this V1EnvVarSource.
:rtype: V1ConfigMapKeySelector
"""
return self._config_map_key_ref
@config_map_key_ref.setter
def config_map_key_ref(self, config_map_key_ref):
"""
Sets the config_map_key_ref of this V1EnvVarSource.
Selects a key of a ConfigMap.
:param config_map_key_ref: The config_map_key_ref of this V1EnvVarSource.
:type: V1ConfigMapKeySelector
"""
self._config_map_key_ref = config_map_key_ref
@property
def field_ref(self):
"""
Gets the field_ref of this V1EnvVarSource.
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
:return: The field_ref of this V1EnvVarSource.
:rtype: V1ObjectFieldSelector
"""
return self._field_ref
@field_ref.setter
def field_ref(self, field_ref):
"""
Sets the field_ref of this V1EnvVarSource.
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
:param field_ref: The field_ref of this V1EnvVarSource.
:type: V1ObjectFieldSelector
"""
self._field_ref = field_ref
@property
def resource_field_ref(self):
"""
Gets the resource_field_ref of this V1EnvVarSource.
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:return: The resource_field_ref of this V1EnvVarSource.
:rtype: V1ResourceFieldSelector
"""
return self._resource_field_ref
@resource_field_ref.setter
def resource_field_ref(self, resource_field_ref):
"""
Sets the resource_field_ref of this V1EnvVarSource.
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param resource_field_ref: The resource_field_ref of this V1EnvVarSource.
:type: V1ResourceFieldSelector
"""
self._resource_field_ref = resource_field_ref
@property
def secret_key_ref(self):
"""
Gets the secret_key_ref of this V1EnvVarSource.
Selects a key of a secret in the pod's namespace
:return: The secret_key_ref of this V1EnvVarSource.
:rtype: V1SecretKeySelector
"""
return self._secret_key_ref
@secret_key_ref.setter
def secret_key_ref(self, secret_key_ref):
"""
Sets the secret_key_ref of this V1EnvVarSource.
Selects a key of a secret in the pod's namespace
:param secret_key_ref: The secret_key_ref of this V1EnvVarSource.
:type: V1SecretKeySelector
"""
self._secret_key_ref = secret_key_ref
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1EnvVarSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.78673 | 220 | 0.632175 |
4a1b2237594d65cbebbfa67e268dc53d2ca0644b
| 13,965 |
py
|
Python
|
neutron/tests/base.py
|
sajuptpm/notification_neutron
|
45933f63c9eff0d2931a7209b040ff2dc69835c5
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/base.py
|
sajuptpm/notification_neutron
|
45933f63c9eff0d2931a7209b040ff2dc69835c5
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/base.py
|
sajuptpm/notification_neutron
|
45933f63c9eff0d2931a7209b040ff2dc69835c5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test cases for all neutron tests.
"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import random
import traceback
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
import testtools
from neutron.agent.linux import external_process
from neutron.callbacks import manager as registry_manager
from neutron.callbacks import registry
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron import policy
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
def get_rand_name(max_length=None, prefix='test'):
name = prefix + str(random.randint(1, 0x7fffffff))
return name[:max_length] if max_length is not None else name
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def get_test_timeout(default=0):
return int(os.environ.get('OS_TEST_TIMEOUT', 0))
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
class DietTestCase(testtools.TestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = get_test_timeout()
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
self.fail("A SystemExit was raised during the test. %s"
% traceback.format_exception(*exc_info))
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in dic.iteritems():
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def setUp(self):
super(ProcessMonitorFixture, self).setUp()
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf.test includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf.test')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# suppress all but errors here
capture_logs = bool_from_env('OS_LOG_CAPTURE')
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
self.useFixture(lockutils.ExternalLockFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.useFixture(fixtures.MonkeyPatch(
'oslo_config.cfg.find_config_files',
lambda project=None, prog=None, extension=None: []))
self.setup_rpc_mocks()
self.setup_config()
self.setup_test_registry_instance()
policy.init()
self.addCleanup(policy.reset)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_test_registry_instance(self):
"""Give a private copy of the registry to each test."""
self._callback_manager = registry_manager.CallbacksManager()
mock.patch.object(registry, '_get_callback_manager',
return_value=self._callback_manager).start()
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None):
self.useFixture(PluginFixture(core_plugin))
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
self.core_plugin = core_plugin
def setUp(self):
super(PluginFixture, self).setUp()
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
raise AssertionError(
'The plugin for this test was not deallocated.')
| 34.825436 | 79 | 0.664805 |
4a1b22d8846974ad89a45d16482ba3fdfaee0673
| 2,645 |
py
|
Python
|
foxlink/non_dimensionalizer.py
|
lamsoa729/FoXlink
|
3c061b02968cdab1def752d5c145a6df4615504b
|
[
"BSD-3-Clause"
] | null | null | null |
foxlink/non_dimensionalizer.py
|
lamsoa729/FoXlink
|
3c061b02968cdab1def752d5c145a6df4615504b
|
[
"BSD-3-Clause"
] | null | null | null |
foxlink/non_dimensionalizer.py
|
lamsoa729/FoXlink
|
3c061b02968cdab1def752d5c145a6df4615504b
|
[
"BSD-3-Clause"
] | 2 |
2019-06-18T16:48:03.000Z
|
2019-06-20T23:50:02.000Z
|
#!/usr/bin/env python
"""@package docstring
File: non_dimensionalizer.py
Author: Adam Lamson
Email: adam.lamson@colorado.edu
Description:
"""
import numpy as np
from copy import deepcopy as dcp
class NonDimensionalizer():
"""!Class to non-dimensionalize parameters."""
def __init__(self, **kwargs):
"""!Take in the necessary values to non-dimensionalize any quantitiy.
@param kwargs
"""
self.__dict__.update(kwargs)
def non_dim_val(self, value, dim_list, exp_list=None):
"""!Non-dimensionalize value based on dimensions and dimension powers.
@param value: Value you wish to convert do a dimensionless quantity. Maybe a numpy array.
@param dim_list: Dimensions. Must be in __dict__ of class
@param exp_list: Exponent of dimensions given.
@return: non-dimensionalized value
"""
if exp_list is None:
exp_list = [1]
val = dcp(value)
for dim, exp in zip(dim_list, exp_list):
if dim not in self.__dict__:
raise RuntimeError(
"Dimension ({}) not in NonDimensionalizer.".format(dim))
val /= np.power(self.__dict__[dim], exp)
print(val)
return val
def dim_val(self, value, dim_list, exp_list=None):
"""!Non-dimensionalize value based on units and dim
@param value: Value you wish to convert from dimensionless quantity to
a dimensionful quanity. Maybe a numpy array.
@param dim_list: Dimensions. Must be in __dict__ of class
@param exp_list: Exponent of dimensions given.
@return: TODO
"""
if exp_list is None:
exp_list = [1]
val = dcp(value)
for dim, exp in zip(dim_list, exp_list):
if dim not in self.__dict__:
raise RuntimeError(
"Dimension ({}) not in NonDimensionalizer.".format(dim))
val *= np.power(self.__dict__[dim], exp)
return val
def calc_new_dim(self, dim_name, dim_list, exp_list):
"""!Calculate new dimension based off current dimensions that already
exist and store it as a property.
@param dim_name: TODO
@param dim_list: TODO
@param exp_list: TODO
@return: void
"""
val = 1.
for dim, exp in zip(dim_list, exp_list):
if dim not in self.__dict__:
raise RuntimeError(
"Dimension ({}) not in NonDimensionalizer.".format(dim))
val *= np.power(self.__dict__[dim], exp)
self.__dict__[dim_name] = val
| 31.86747 | 97 | 0.603403 |
4a1b231201c8658990c96bb22066f0532c85447b
| 18,732 |
py
|
Python
|
xor/symcollab/xor/xorhelper.py
|
symcollab/CryptoSolve
|
fc5c709cfeb9de7728b7baddc5d5da60d13ede0a
|
[
"BSD-3-Clause"
] | 1 |
2021-02-09T20:06:29.000Z
|
2021-02-09T20:06:29.000Z
|
xor/symcollab/xor/xorhelper.py
|
symcollab/CryptoSolve
|
fc5c709cfeb9de7728b7baddc5d5da60d13ede0a
|
[
"BSD-3-Clause"
] | null | null | null |
xor/symcollab/xor/xorhelper.py
|
symcollab/CryptoSolve
|
fc5c709cfeb9de7728b7baddc5d5da60d13ede0a
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import deepcopy
from symcollab.algebra import Constant, Variable, FuncTerm, Equation, SubstituteTerm
from symcollab.Unification.unif import unif
from .structure import Zero, XORTerm, Equations, Disequations, Disequation
from .xor import xor
def is_xor_term(t):
#return (isinstance(t, FuncTerm)) and (isinstance(t.function, Xor))
if(isinstance(t, Zero)):
return False
else:
return (isinstance(t, FuncTerm) and t.function.symbol == "xor")
def is_XOR_Term(t):
return isinstance(t, XORTerm)
def xor_to_list(t):
#convert a xor-term to a list of terms
if(not is_xor_term(t)):
return [t]
else:
lst1 = xor_to_list(t.arguments[0])
lst2 = xor_to_list(t.arguments[1])
return lst1 + lst2
def simplify(lst):
result = []
for item in lst:
if(isinstance(item, Zero)):
continue
if(item in result):
result.remove(item)
else:
result.append(item)
return result
def simplify_XOR_Term(t):
simplified_arguments = simplify(t.arguments)
if(len(simplified_arguments) == 0):
return Zero()
elif(len(simplified_arguments) == 1):
return simplified_arguments[0]
else:
return XORTerm(simplified_arguments)
def list_to_xor(lst):
#convert a list of terms to a xor-term
if(len(lst) == 0):
return Zero()
elif(len(lst) == 1):
return lst[0]
else:
head, *tail = lst
return xor(head, list_to_xor(tail))
def collect_all_variables_in_term(t):
#Returns a list of variables in a term.
#f(x, g(y)) ==> [x,y]
if (isinstance(t, Zero)):
return []
elif (isinstance(t, Constant)):
return []
elif (isinstance(t, Variable)):
return [t]
elif (isinstance(t, FuncTerm)):
result = []
for arg in t.arguments:
result = result + collect_all_variables_in_term(arg)
return list(set(result))
'''
elif (isinstance(t, XORTerm)):
result = []
for arg in t.arguments:
result = result + collect_all_variables_in_term(arg)
return list(set(result))
'''
def collect_all_variables_in_equation(eq):
lhs = eq.left_side
rhs = eq.right_side
all_variables = collect_all_variables_in_term(lhs) + collect_all_variables_in_term(rhs)
return list(set(all_variables))
def collect_all_unconstrained_variables_in_equation(eq):
variables = collect_all_variables_in_equation(eq)
results = ()
for var in variables:
if (not is_constrained_in_equation(var, eq)):
results.add(var)
return results
def look_up_a_name(t, eqs):
#t is a xor-term, eqs is a set of equations.
#This function looks t up in eqs, and checks if t already has a name in eqs.
#If so, it returns the existing name. Otherwise, it returns None.
first_term = t.arguments[0]
second_term = t.arguments[1]
for eq in eqs:
rhs = eq.right_side
if(is_xor_term(rhs)):
fst_t = rhs.arguments[0]
snd_t = rhs.arguments[1]
possibility1 = first_term == fst_t and second_term == snd_t
possibility2 = first_term == snd_t and second_term == fst_t
if(possibility1 or possibility2):
return eq.left_side
return None
def name_xor_terms(t, eqs):
# Purify arguments
# Name top-level xor-subterms
# Return (renamed_term, a set of equations)
if (isinstance(t, Zero)):
return (t, eqs)
elif (isinstance(t, Constant)):
return (t, eqs)
elif (isinstance(t, Variable)):
return (t, eqs)
elif(is_xor_term(t)):
(term1, eqs) = purify_a_term(t.arguments[0], eqs)
#eqs = eqs + equation1
(term2, eqs) = purify_a_term(t.arguments[1], eqs)
#eqs = eqs + equation2
#equations = equation1 + equation2
name = look_up_a_name(xor(term1, term2), eqs)
if(name != None):
return (name, eqs)
else:
global variable_counter
variable_counter += 1
new_variable = Variable("N" + str(variable_counter))
eqs.append(Equation(new_variable, xor(term1, term2)))
return (new_variable, eqs)
elif (isinstance(t, FuncTerm)):
terms = []
for arg in t.arguments:
(term, eqs) = name_xor_terms(arg, eqs)
#eqs = eqs + equations
terms.append(term)
return (FuncTerm(t.function, terms), eqs)
else:
print("error")
return None
def purify_a_term(t, eqs):
if (isinstance(t, Zero)):
return (t, eqs)
elif (isinstance(t, Constant)):
return (t, eqs)
elif (isinstance(t, Variable)):
return (t, eqs)
elif (is_xor_term(t)):
(left, eqs) = purify_a_term(t.arguments[0], eqs)
#eqs = eqs + equation1
(right, eqs) = purify_a_term(t.arguments[1], eqs)
#eqs = eqs + equation2
return (xor(left, right), eqs)
elif(isinstance(t, FuncTerm)):
terms = []
for arg in t.arguments:
(term, eqs) = name_xor_terms(arg, eqs)
#eqs = eqs + equations
terms.append(term)
return (FuncTerm(t.function, terms), eqs)
else:
print("error")
return None
def purify_an_equation(eq, eqs):
(purified_lhs, eqs) = purify_a_term(eq.left_side, eqs)
#eqs = eqs + eqs1
(purified_rhs, eqs) = purify_a_term(eq.right_side, eqs)
#eqs = eqs + eqs2
new_equation = Equation(purified_lhs, purified_rhs)
return (new_equation, eqs)
def purify_equations(eqs):
equations = []
for eq in eqs.contents:
(left, equations) = purify_an_equation(eq, equations)
equations.append(left)
#equations = equations + right
return Equations(equations)
def is_constrained_in_term(v, t):
# Check if the current variable is constrained in t
# That is, check if it appears underneath a function symbol.
#Precondition: t should be purified.
if (isinstance(t, Zero)):
return False
elif (isinstance(t, Constant)):
return False
elif (isinstance(t, Variable)):
return False
elif(is_xor_term(t)):
left = is_constrained_in_term(v, t.arguments[0])
right = is_constrained_in_term(v, t.arguments[1])
return left or right
elif (isinstance(t, FuncTerm)):
return (v in t)
def is_constrained_in_equation(v, eq):
lhs = eq.left_side
rhs = eq.right_side
return is_constrained_in_term(v, lhs) or is_constrained_in_term(v, rhs)
def is_constrained_in_equations(v, eqs):
result = False
equations = eqs.contents
for eq in equations:
result = result or is_constrained_in_equation(v, eq)
return result
def normalize_an_equation(eq):
if(isinstance(eq.right_side,Zero)):
lhs = eq.left_side
else:
lhs = xor(eq.left_side, eq.right_side)
lst = xor_to_list(lhs)
lst = simplify(lst)
new_lhs = list_to_xor(lst)
new_rhs = Zero()
new_eq = Equation(new_lhs, new_rhs)
return new_eq
def normalize_equations(eqs):
equations = eqs.contents
new_eqs = []
for eq in equations:
new_eqs.append(normalize_an_equation(eq))
return Equations(new_eqs)
def apply_sub_to_equation(eq, sub):
lhs = eq.left_side
rhs = eq.right_side
return Equation(lhs * sub, rhs * sub)
def apply_sub_to_disequation(diseq, sub):
lhs = diseq.left_side
rhs = diseq.right_side
return Disequation(lhs * sub, rhs * sub)
def apply_sub_to_equations(eqs, sub):
equations = eqs.contents
result = []
for eq in equations:
result.append(apply_sub_to_equation(eq, sub))
return Equations(result)
def apply_sub_to_disequations(diseqs, sub):
disequations = diseqs.contents
result = []
for diseq in disequations:
result.append(apply_sub_to_disequation(diseq, sub))
return Equations(result)
class XOR_proof_state:
def __init__(self, equations, disequations, substitution):
self.equations = equations
self.disequations = disequations
self.substitution = substitution
def normalize(self):
eqs = self.equations
diseqs = self.disequations
substs = self.substitution
eqs = normalize_equations(eqs)
return XOR_proof_state(eqs, diseqs, substs)
def __repr__(self):
return str(self.equations) + " | " + str(self.disequations) + " | " + str(self.substitution)
class XOR_Rule:
def __init__(self):
pass
def is_applicable(self, state):
pass
def apply(self, state):
pass
class Rule_Trivial(XOR_Rule):
def __init__(self):
pass
def is_applicable(self, state):
eqs = state.equations.contents
for eq in eqs:
if (isinstance(eq.left_side, Zero) and isinstance(eq.right_side, Zero)):
return True
return False
def apply(self, state):
eqs = state.equations.contents
diseqs = state.disequations
substs = state.substitution
for index in range(len(eqs)):
eq = eqs[index]
if (isinstance(eq.left_side, Zero) and isinstance(eq.right_side, Zero)):
del eqs[index]
return XOR_proof_state(Equations(eqs), diseqs, substs)
class Rule_Subst(XOR_Rule):
def is_applicable(self, state):
#Check if the "Variable Substitution" rule is applicable.
eqs = state.equations.contents
for eq in eqs:
all_variables = collect_all_variables_in_equation(eq)
for var in all_variables:
if(not is_constrained_in_equations(var, state.equations)):
return True
return False
def build_a_substitution_from(self, v, eq):
#Build a substitution from equation eq. The domain is variable v.
#For example, if v: x, eq: f(y) + a = x + b,
#then, the result would be: x |-> f(y) + a + b
#Assume that there are no duplicate terms in eq.
args = xor_to_list(eq.left_side)
for arg in args:
if(v == arg):
args.remove(arg)
range = list_to_xor(args)
sigma = SubstituteTerm()
sigma.add(v, range)
return sigma
def get_a_substitution(self, state):
#Precondition: Variable Substitution rule is applicable.
#Postcondition: It returns a substitution of the form x |-> t, where x is not constrained.
#It also remove that equation from state.equations.
eqs = state.equations.contents
for eq in eqs:
all_variables = collect_all_variables_in_equation(eq)
for var in all_variables:
if (not is_constrained_in_equations(var, state.equations)):
subst = self.build_a_substitution_from(var, eq)
state.equations.contents.remove(eq)
return subst
return None
def apply(self, state):
#need to be reduced
subst = self.get_a_substitution(state)
new_eqs = apply_sub_to_equations(state.equations, subst)
new_eqs = normalize_equations(new_eqs)
#new_diseqs = apply_sub_to_disequations(state.disequations, subst)
state.substitution = state.substitution * subst
return XOR_proof_state(new_eqs, state.disequations, state.substitution)
class Rule_N_Decompose(XOR_Rule):
def __init__(self):
pass
def consistent_with_one_disequation(self, t1, t2, diseq):
#Assume that the terms have been reduced.
#In other words, there are no duplicates xor-subterms.
if(t1 == diseq.left_side and t2 == diseq.right_side):
return False
if (t2 == diseq.left_side and t1 == diseq.right_side):
return False
return True
def consistent_with_disequations(self, t1, t2, diseqs):
# Assume that the terms have been reduced.
# In other words, there are no duplicates xor-subterms.
result = True
for diseq in diseqs:
result = result and self.consistent_with_one_disequation(t1, t2, diseq)
return result
def is_applicable(self, state):
result = self.get_equation_and_two_terms(state)
return result != None
def get_equation_and_two_terms(self, state):
eqs = state.equations.contents
#substs = state.substitution
diseqs = state.disequations.contents
def same_function(t1, t2):
v1 = isinstance(t1, FuncTerm)
v2 = isinstance(t2, FuncTerm)
return v1 and v2 and (t1.function.symbol == t2.function.symbol)
for index in range(len(eqs)):
seen_terms = []
all_xor_terms = xor_to_list(eqs[index].left_side)
for new_term in all_xor_terms:
for seen_term in seen_terms:
if (same_function(new_term, seen_term) and self.consistent_with_disequations(new_term, seen_term, diseqs)):
return (index, new_term, seen_term)
seen_terms.append(new_term)
return None
def apply(self, state):
#Apply a rule to some equation
#Assume that the N-Decomposition rule is applicable.
#Returns two states
equations = state.equations.contents
disequations = state.disequations.contents
substs = state.substitution
(eq_index, first_term, second_term) = self.get_equation_and_two_terms(state)
equations1 = equations
equations2 = deepcopy(equations)
equation_to_be_processed = equations1[eq_index]
del equations1[eq_index]
#Remove first_term and second_term from new_equation.
new_term = xor(equation_to_be_processed.left_side, equation_to_be_processed.right_side)
term_list = xor_to_list(new_term)
new_lhs = []
for t in term_list:
if(t == first_term or t == second_term):
continue
else:
new_lhs.append(t)
if(len(new_lhs) != 0):
new_equation = Equation(list_to_xor(new_lhs), Zero())
else:
new_equation = Equation(Zero(), Zero())
equations1.append(new_equation)
unifier = unif(first_term, second_term)
#equations1.append(Equation(xor(first_term.arg, second_term.arg), Zero()))
#if((first_term == second_term) or (unifier.domain() != [])):
# unifiable = True
#else:
# unifiable = False
if(unifier == False):
unifiable = False
else:
unifiable = True
disequations1 = disequations
disequations2 = deepcopy(disequations)
new_disequation = Disequation(first_term, second_term)
disequations2.append(new_disequation)
state2 = XOR_proof_state(Equations(equations2), Disequations(disequations2), substs)
if(unifiable):
state1 = XOR_proof_state(apply_sub_to_equations(Equations(equations1), unifier),apply_sub_to_disequations(Disequations(disequations1), unifier), substs * unifier)
return (unifiable, state1, state2)
else:
return (unifiable, state, state2)
class Rule_Decompose(XOR_Rule):
def __init__(self):
pass
def has_two_arguments(self, t):
if(is_xor_term(t)):
left_is_not_xor = not is_xor_term(t.arguments[0])
right_is_not_xor = not is_xor_term(t.arguments[1])
return left_is_not_xor and right_is_not_xor
def is_of_form_f_f(self, eq):
lhs = eq.left_side
if(self.has_two_arguments(lhs)):
first = lhs.arguments[0]
second = lhs.arguments[1]
lhs_is_func_term = isinstance(first, FuncTerm) and (not isinstance(first, Constant))
rhs_is_func_term = isinstance(second, FuncTerm) and (not isinstance(second, Constant))
return lhs_is_func_term and rhs_is_func_term and (first.function.symbol == second.function.symbol)
else:
return False
def is_applicable(self, state):
eqs = state.equations.contents
for eq in eqs:
if(self.is_of_form_f_f(eq)):
return eq
return False
def apply(self, state):
eq = self.is_applicable(state)
first = eq.left_side.arguments[0]
second = eq.left_side.arguments[1]
sigma = unif(first, second)
if(sigma == False):
return (False, None)
else:
new_eqs = apply_sub_to_equations(state.equations, sigma)
new_eqs = normalize_equations(new_eqs)
state.substitution = state.substitution * sigma
return (True, XOR_proof_state(new_eqs, state.disequations, state.substitution))
def xor_unification_helper(state):
#Returns a list of substitutions
state = state.normalize()
#print("Here is a state: ")
#print(state)
eqs = state.equations
substs = state.substitution
diseqs = state.disequations
if (len(eqs.contents) == 0): #unifiable
return [substs]
trivial_rule = Rule_Trivial()
subst_rule = Rule_Subst()
n_decompose_rule = Rule_N_Decompose()
decompose_rule = Rule_Decompose()
#if no rule is applicable, return []
#otherwise take an inference step
#trivial_applicable = trivial_rule.is_applicable(state)
#subst_applicable = subst_rule.is_applicable(state)
#decompose_applicable = decompose_rule.is_applicable(state)
#
#if(not (trivial_applicable or subst_applicable or decompose_applicable)):
# return [] #not unifiable
if(trivial_rule.is_applicable(state)):
new_state = trivial_rule.apply(state)
return xor_unification_helper(new_state)
elif(decompose_rule.is_applicable(state)):
(unifiable, new_state) = decompose_rule.apply(state)
if(unifiable):
return xor_unification_helper(new_state)
else:
return []
elif(subst_rule.is_applicable(state)):
new_state = subst_rule.apply(state)
return xor_unification_helper(new_state)
elif(n_decompose_rule.is_applicable(state)):
(unifiable, state1, state2) = n_decompose_rule.apply(state)
if(unifiable):
return xor_unification_helper(state1) + xor_unification_helper(state2)
else:
return xor_unification_helper(state2)
else:
return []
def xor_unification(eqs):
equations = purify_equations(eqs)
diseqs = Disequations([])
subst = SubstituteTerm()
state = XOR_proof_state(equations, diseqs, subst)
solutions = xor_unification_helper(state)
return solutions
variable_counter = 0
| 33.873418 | 174 | 0.632928 |
4a1b242902e4f942b1ff3e2cab447ec387c49fb4
| 1,469 |
py
|
Python
|
pbctf/2020/blacklist/genfiles.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 65 |
2019-10-07T01:29:16.000Z
|
2022-03-18T14:20:40.000Z
|
pbctf/2020/blacklist/genfiles.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
pbctf/2020/blacklist/genfiles.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 12 |
2020-05-04T01:16:53.000Z
|
2022-01-02T14:33:41.000Z
|
import string, random, os, sys
pool = string.ascii_letters + string.digits
random.seed(open('/dev/random', 'rb').read(16))
flag_parts = ['F', 'f', 'L', 'l', 'A', 'a', 'G', 'g', '70', '102', '76', '108',
'65', '97', '71', '103', '0x46', '0x66', '0x4c', '0x6c', '0x41', '0x61',
'0x47', '0x67', 'fl', 'la', 'ag', 'fla', 'lag', 'flag', 'FLAG', 'FLA',
'LAG', 'FL', 'LA', 'AG']
def randint(x):
return random.randint(0, x - 1)
def randstr():
msg = ''
for i in range(25):
msg += pool[randint(len(pool))]
return msg
def main():
if len(sys.argv) != 2:
print('Usage: {} FLAG_VALUE'.format(sys.argv[0]))
exit(1)
flag = open(sys.argv[1], 'r').read().strip()
def flatten(aval, bval, cval):
return aval * 50 + bval * 5 + cval
base = 'flag_dir'
os.mkdir(base)
flagpos = randint(len(flag_parts) * 50)
for a in range(len(flag_parts)):
aa = base + '/' + flag_parts[a]
os.mkdir(aa)
for b in range(10):
bb = aa + '/' + randstr()
os.mkdir(bb)
for c in range(5):
cc = bb + '/' + randstr()
with open(cc, 'w') as f:
if flatten(a, b, c) == flagpos:
print('Flag is located at: ./' + cc)
f.write(flag + '\n')
else:
f.write(randstr() + '\n')
if __name__ == '__main__':
main()
| 28.803922 | 80 | 0.456773 |
4a1b258e84b18cdb2108998a5759b3ae9338eeec
| 1,380 |
py
|
Python
|
boards/metro_m4_express/3.x/lib/adafruit_hid/__init__.py
|
jepler/circuitpython-default-files
|
04a9134e41a8766fc7c5ce3666d433ddedfa6ed6
|
[
"MIT"
] | 9 |
2019-03-15T02:47:49.000Z
|
2022-01-20T05:54:08.000Z
|
infra/libs-400rc2-20190512/lib/adafruit_hid/__init__.py
|
jadudm/feather-isa
|
b7419e6698c3f64be4d8122656eb8124631ca859
|
[
"MIT"
] | 3 |
2019-11-07T03:37:40.000Z
|
2021-02-09T20:09:39.000Z
|
lib/adafruit_hid/__init__.py
|
stoicsimian/gameclock
|
3d22c5032dc0139d1ca2f3c29d4a425ed8521a3d
|
[
"MIT"
] | 14 |
2019-04-07T20:30:27.000Z
|
2022-01-20T05:54:10.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_hid`
====================================================
This driver simulates USB HID devices. Currently keyboard and mouse are implemented.
* Author(s): Scott Shawcroft, Dan Halbert
"""
| 44.516129 | 85 | 0.728261 |
4a1b274e1ae2fba791921111ffefa8e3eb237de0
| 826 |
py
|
Python
|
Own/Python/Tutorials/Lists.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
Own/Python/Tutorials/Lists.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
Own/Python/Tutorials/Lists.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
#Cristian Chitiva
#cychitivav@unal.edu.co
#12/Sept/2018
myList = ['Hi', 5, 6 , 3.4, "i"] #Create the list
myList.append([4, 5]) #Add sublist [4, 5] to myList
myList.insert(2,"f") #Add "f" in the position 2
print(myList)
myList = [1, 3, 4, 5, 23, 4, 3, 222, 454, 6445, 6, 4654, 455]
myList.sort() #Sort the list from lowest to highest
print(myList)
myList.sort(reverse = True) #Sort the list from highest to lowest
print(myList)
myList.extend([5, 77]) #Add 5 and 77 to myList
print(myList)
#List comprehension
myList = []
for value in range(0, 50):
myList.append(value)
print(myList)
myList = ["f" for value in range(0,20)]
print(myList)
myList = [value for value in range(0,20)]
print(myList)
myList = [value for value in range(0,60,3) if value % 2 == 0]
print(myList)
| 25.030303 | 68 | 0.641646 |
4a1b28f49024fb7770d30f0774f5dc01174781f6
| 14,545 |
py
|
Python
|
droplet/server/executor/call.py
|
xcharleslin/droplet
|
3124f60b3621df83a22e6dd14dce853330455340
|
[
"Apache-2.0"
] | null | null | null |
droplet/server/executor/call.py
|
xcharleslin/droplet
|
3124f60b3621df83a22e6dd14dce853330455340
|
[
"Apache-2.0"
] | null | null | null |
droplet/server/executor/call.py
|
xcharleslin/droplet
|
3124f60b3621df83a22e6dd14dce853330455340
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from anna.lattices import (
Lattice,
MapLattice,
MultiKeyCausalLattice,
SetLattice,
SingleKeyCausalLattice,
VectorClock
)
from droplet.server.executor import utils
import droplet.server.utils as sutils
from droplet.shared.proto.droplet_pb2 import (
DagTrigger,
FunctionCall,
NORMAL, MULTI, # Droplet's consistency modes,
EXECUTION_ERROR, FUNC_NOT_FOUND # Droplet's error types
)
from droplet.shared.reference import DropletReference
from droplet.shared.serializer import Serializer
serializer = Serializer()
def exec_function(exec_socket, kvs, user_library, cache):
call = FunctionCall()
call.ParseFromString(exec_socket.recv())
fargs = [serializer.load(arg) for arg in call.arguments.values]
f = utils.retrieve_function(call.name, kvs, call.consistency)
if not f:
logging.info('Function %s not found! Returning an error.' %
(call.name))
sutils.error.error = FUNC_NOT_FOUND
result = ('ERROR', sutils.error.SerializeToString())
else:
try:
if call.consistency == NORMAL:
result = _exec_func_normal(kvs, f, fargs, user_library, cache)
else:
dependencies = {}
result = _exec_func_causal(kvs, f, fargs, user_library,
dependencies=dependencies)
except Exception as e:
logging.exception('Unexpected error %s while executing function.' %
(str(e)))
sutils.error.error = EXECUTION_ERROR
result = ('ERROR: ' + str(e), sutils.error.SerializeToString())
if call.consistency == NORMAL:
result = serializer.dump_lattice(result)
succeed = kvs.put(call.response_key, result)
else:
result = serializer.dump_lattice(result, MultiKeyCausalLattice,
causal_dependencies=dependencies)
succeed = kvs.causal_put(call.response_key, result)
if not succeed:
logging.info(f'Unsuccessful attempt to put key {call.response_key} '
+ 'into the KVS.')
def _exec_func_normal(kvs, func, args, user_lib, cache):
refs = list(filter(lambda a: isinstance(a, DropletReference), args))
if refs:
refs = _resolve_ref_normal(refs, kvs, cache)
return _run_function(func, refs, args, user_lib)
def _exec_func_causal(kvs, func, args, user_lib, schedule=None,
key_version_locations={}, dependencies={}):
refs = list(filter(lambda a: isinstance(a, DropletReference), args))
if refs:
refs = _resolve_ref_causal(refs, kvs, schedule, key_version_locations,
dependencies)
return _run_function(func, refs, args, user_lib)
def _run_function(func, refs, args, user_lib):
# Set the first argument to the user library.
func_args = (user_lib,)
# If any of the arguments are references, we insert the resolved reference
# instead of the raw value.
for arg in args:
if isinstance(arg, DropletReference):
func_args += (refs[arg.key],)
else:
func_args += (arg,)
res = func(*func_args)
return res
def _resolve_ref_normal(refs, kvs, cache):
deserialize_map = {}
kv_pairs = {}
keys = set()
for ref in refs:
deserialize_map[ref.key] = ref.deserialize
if ref.key in cache:
kv_pairs[ref.key] = cache[ref.key]
else:
keys.add(ref.key)
keys = list(keys)
if len(keys) != 0:
returned_kv_pairs = kvs.get(keys)
# When chaining function executions, we must wait, so we check to see
# if certain values have not been resolved yet.
while None in returned_kv_pairs.values():
returned_kv_pairs = kvs.get(keys)
for key in keys:
# Because references might be repeated, we check to make sure that
# we haven't already deserialized this ref.
if deserialize_map[key] and isinstance(returned_kv_pairs[key],
Lattice):
kv_pairs[key] = serializer.load_lattice(returned_kv_pairs[key])
else:
kv_pairs[key] = returned_kv_pairs[key]
# Cache the deserialized payload for future use
cache[key] = kv_pairs[key]
return kv_pairs
def _resolve_ref_causal(refs, kvs, schedule, key_version_locations,
dependencies):
if schedule:
future_read_set = _compute_children_read_set(schedule)
client_id = schedule.client_id
consistency = schedule.consistency
else:
future_read_set = set()
client_id = 0
consistency = MULTI
keys = [ref.key for ref in refs]
(address, versions), kv_pairs = kvs.causal_get(keys, future_read_set,
key_version_locations,
consistency, client_id)
while None in kv_pairs.values():
(address, versions), kv_pairs = kvs.causal_get(keys, future_read_set,
key_version_locations,
consistency, client_id)
if address is not None:
if address not in key_version_locations:
key_version_locations[address] = versions
else:
key_version_locations[address].extend(versions)
for key in kv_pairs:
if key in dependencies:
dependencies[key].merge(kv_pairs[key].vector_clock)
else:
dependencies[key] = kv_pairs[key].vector_clock
for ref in refs:
key = ref.key
if ref.deserialize:
# In causal mode, you can only use these two lattice types.
if (isinstance(kv_pairs[key], SingleKeyCausalLattice) or
isinstance(kv_pairs[key], MultiKeyCausalLattice)):
# If there are multiple values, we choose the first one listed
# at random.
kv_pairs[key] = serializer.load_lattice(kv_pairs[key])[0]
else:
raise ValueError(('Invalid lattice type %s encountered when' +
' executing in causal mode.') %
str(type(kv_pairs[key])))
return kv_pairs
def exec_dag_function(pusher_cache, kvs, triggers, function, schedule,
user_library, dag_runtimes, cache):
if schedule.consistency == NORMAL:
finished = _exec_dag_function_normal(pusher_cache, kvs,
triggers, function, schedule,
user_library, cache)
else:
finished = _exec_dag_function_causal(pusher_cache, kvs,
triggers, function, schedule,
user_library)
# If finished is true, that means that this executor finished the DAG
# request. We will report the end-to-end latency for this DAG if so.
if finished:
dname = schedule.dag.name
if dname not in dag_runtimes:
dag_runtimes[dname] = []
runtime = time.time() - schedule.start_time
dag_runtimes[schedule.dag.name].append(runtime)
def _construct_trigger(sid, fname, result):
trigger = DagTrigger()
trigger.id = sid
trigger.source = fname
if type(result) != tuple:
result = (result,)
trigger.arguments.values.extend(list(
map(lambda v: serializer.dump(v, None, False), result)))
return trigger
def _exec_dag_function_normal(pusher_cache, kvs, triggers, function, schedule,
user_lib, cache):
fname = schedule.target_function
fargs = list(schedule.arguments[fname].values)
for trname in schedule.triggers:
trigger = triggers[trname]
fargs += list(trigger.arguments.values)
fargs = [serializer.load(arg) for arg in fargs]
result = _exec_func_normal(kvs, function, fargs, user_lib, cache)
is_sink = True
new_trigger = _construct_trigger(schedule.id, fname, result)
for conn in schedule.dag.connections:
if conn.source == fname:
is_sink = False
new_trigger.target_function = conn.sink
dest_ip = schedule.locations[conn.sink]
sckt = pusher_cache.get(sutils.get_dag_trigger_address(dest_ip))
sckt.send(new_trigger.SerializeToString())
if is_sink:
if schedule.response_address:
sckt = pusher_cache.get(schedule.response_address)
logging.info('DAG %s (ID %s) result returned to requester.' %
(schedule.dag.name, trigger.id))
sckt.send(serializer.dump(result))
else:
lattice = serializer.dump_lattice(result)
output_key = schedule.output_key if schedule.output_key \
else schedule.id
logging.info('DAG %s (ID %s) result in KVS at %s.' %
(schedule.dag.name, trigger.id, output_key))
kvs.put(output_key, lattice)
return is_sink
def _exec_dag_function_causal(pusher_cache, kvs, triggers, function, schedule,
user_lib):
fname = schedule.target_function
fargs = list(schedule.arguments[fname].values)
key_version_locations = {}
dependencies = {}
for trname in schedule.triggers:
trigger = triggers[trname]
fargs += list(trigger.arguments.values)
# Combine the locations of upstream cached key versions from all
# triggers.
for addr in trigger.version_locations:
if addr in key_version_locations:
key_version_locations[addr].extend(
trigger.version_locations[addr].key_versions)
else:
key_version_locations[addr] = list(
trigger.version_locations[addr])
# Combine the dependency sets from all triggers.
for dependency in trigger.dependencies:
vc = VectorClock(dict(dependency.vector_clock), True)
key = dependency.key
if key in dependencies:
dependencies[key].merge(vc)
else:
dependencies[key] = vc
fargs = [serializer.load(arg) for arg in fargs]
result = _exec_func_causal(kvs, function, fargs, user_lib, schedule,
key_version_locations, dependencies)
# Create a new trigger with the schedule ID and results of this execution.
new_trigger = _construct_trigger(schedule.id, fname, result)
# Serialize the key version location information into this new trigger.
for addr in key_version_locations:
new_trigger.version_locations[addr].keys.extend(
key_version_locations[addr])
# Serialize the set of dependency versions for causal metadata.
for key in dependencies:
dep = new_trigger.dependencies.add()
dep.key = key
dependencies[key].serialize(dep.vector_clock)
is_sink = True
for conn in schedule.dag.connections:
if conn.source == fname:
is_sink = False
new_trigger.target_function = conn.sink
dest_ip = schedule.locations[conn.sink]
sckt = pusher_cache.get(sutils.get_dag_trigger_address(dest_ip))
sckt.send(new_trigger.SerializeToString())
if is_sink:
logging.info('DAG %s (ID %s) completed in causal mode; result at %s.' %
(schedule.dag.name, schedule.id, schedule.output_key))
vector_clock = {}
okey = schedule.output_key
if okey in dependencies:
prev_count = 0
if schedule.client_id in dependencies[okey]:
prev_count = dependencies[okey][schedule.client_id]
dependencies[okey].update(schedule.client_id, prev_count + 1)
dependencies[okey].serialize(vector_clock)
del dependencies[okey]
else:
vector_clock = {schedule.client_id: 1}
# Serialize result into a MultiKeyCausalLattice.
vector_clock = VectorClock(vector_clock, True)
result = serializer.dump(result)
dependencies = MapLattice(dependencies)
lattice = MultiKeyCausalLattice(vector_clock, dependencies,
SetLattice({result}))
succeed = kvs.causal_put(schedule.output_key,
lattice, schedule.client_id)
while not succeed:
succeed = kvs.causal_put(schedule.output_key,
lattice, schedule.client_id)
# Issues requests to all upstream caches for this particular request
# and asks them to garbage collect pinned versions stored for the
# context of this request.
for cache_addr in key_version_locations:
gc_address = utils.get_cache_gc_address(cache_addr)
sckt = pusher_cache.get(gc_address)
sckt.send_string(schedule.client_id)
return is_sink
def _compute_children_read_set(schedule):
future_read_set = set()
fname = schedule.target_function
children = set()
delta = {fname}
while len(delta) > 0:
new_delta = set()
for conn in schedule.dag.connections:
if conn.source in delta:
children.add(conn.sink)
new_delta.add(conn.sink)
delta = new_delta
for child in children:
refs = list(filter(lambda arg: type(arg) == DropletReference,
[serializer.load(arg) for arg in
schedule.arguments[child].values]))
for ref in refs:
future_read_set.add(ref.key)
return future_read_set
| 36.27182 | 79 | 0.612994 |
4a1b291243be086e9c59052240b10109a46b89af
| 5,147 |
py
|
Python
|
configs/representation/uvc2_fpn/uvc2_r18-fpn_center_it_l1_video_2x8x1_sgd_cosine_10e_k400_rgb.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/uvc2_fpn/uvc2_r18-fpn_center_it_l1_video_2x8x1_sgd_cosine_10e_k400_rgb.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/uvc2_fpn/uvc2_r18-fpn_center_it_l1_video_2x8x1_sgd_cosine_10e_k400_rgb.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
# model settings
temperature = 0.01
with_norm = True
model = dict(
type='UVCTrackerV2',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(1, 2, 3),
# strides=(1, 2, 1, 1),
norm_eval=False,
zero_init_residual=True),
neck=dict(
type='FPN',
in_channels=[128, 256, 512],
out_channels=256,
num_outs=4,
out_index=0),
cls_head=dict(
type='UVCHead',
loss_feat=None,
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=with_norm,
loss_weight=1.),
loss_bbox=dict(type='L1Loss', loss_weight=10.),
in_channels=256,
channels=128,
temperature=temperature,
with_norm=with_norm,
init_std=0.01,
track_type='center'))
# model training and testing settings
train_cfg = dict(
patch_size=96,
img_as_ref=True,
img_as_tar=False,
diff_crop=True,
skip_cycle=True,
center_ratio=0.)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
# strides=(1, 2, 1, 1),
out_indices=(0, ),
neighbor_range=40,
with_norm=with_norm,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop', area_range=(0.2, 1.)),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.1,
p=0.8),
dict(type='RandomGrayScale', p=0.2),
dict(type='RandomGaussianBlur', p=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=48,
workers_per_gpu=4,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=1e-1)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 10
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['uvc2-fpn'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.636905 | 78 | 0.620167 |
4a1b2aa0c4ecd20fa968d891f736ed65bfa701b3
| 3,132 |
py
|
Python
|
src/plugins/21_point/card_lib.py
|
NekoMashiro/water_spinach_bot
|
52b805169bbf49ff167ee7eaf6fa477f8b8d1c99
|
[
"MIT"
] | 1 |
2022-03-22T17:43:52.000Z
|
2022-03-22T17:43:52.000Z
|
src/plugins/21_point/card_lib.py
|
NekoMashiro/water_spinach_bot
|
52b805169bbf49ff167ee7eaf6fa477f8b8d1c99
|
[
"MIT"
] | null | null | null |
src/plugins/21_point/card_lib.py
|
NekoMashiro/water_spinach_bot
|
52b805169bbf49ff167ee7eaf6fa477f8b8d1c99
|
[
"MIT"
] | null | null | null |
total_card = []
player = 0
finished_player = 0
every_player_card = {}
every_player_point = {}
def card_game_start():
global total_card
global player
global finished_player
player = 0
finished_player = 0
total_card = random_card_list()
every_player_card.clear()
every_player_card['banker'] = []
every_player_card['banker'].append(total_card.pop())
every_player_card['banker'].append(total_card.pop())
first_card = card_to_string(every_player_card['banker'][0])
str = '菜菜的第一张牌是' + first_card + '哦 '
if(every_player_card['banker'][0][1] == 1):
str += '好像很危险耶Σ(゚∀゚ノ)ノ '
str += '至于第二张牌……可不能告诉你呢!'
return str
def add_player(player_id):
global player
player = player + 1
every_player_card[player_id] = []
every_player_card[player_id].append(total_card.pop())
every_player_card[player_id].append(total_card.pop())
str = '你的初始手牌是:'
for card in every_player_card[player_id]:
str += card_to_string(card) + ' '
str += calc_handcards_point(player_id)
return str
def stop_card(player_id):
global player
global finished_player
finished_player = finished_player + 1
if player == finished_player:
return True
return False
def ask_card(player_id):
every_player_card[player_id].append(total_card.pop())
handcards = every_player_card[player_id].copy()
str = '你的手牌是:'
for card in handcards:
str += card_to_string(card) + ' '
str += calc_handcards_point(player_id)
return str
def calc_handcards_point(player_id):
handcards = every_player_card[player_id]
point = 0
num_a = 0
for card in handcards:
if card[1] > 10:
point += 10
else:
point += card[1]
if card[1] == 1:
num_a = num_a + 1
while point <= 11 and num_a > 0:
point += 10
num_a -= 1
if point > 21:
every_player_point[player_id] = -1
return '总点数是……' + point.__str__() + '…………爆掉了(°ー°〃)'
if len(handcards) == 2 and point == 21:
every_player_point[player_id] = 22
return '是Black Jack诶Σ(゚∀゚ノ)ノ'
every_player_point[player_id] = point
str = '总点数是' + point.__str__() + '点哦'
if point > 14 and point < 21:
from random import randint
if randint(1, 10) + point <= 21:
str += ' 这不贪一手嘛_(•̀ω•́ 」∠)_'
return str
def card_to_string(card: tuple):
str = ''
if card[0] == 1:
str += '♠'
elif card[0] == 2:
str += '♥'
elif card[0] == 3:
str += '♣'
elif card[0] == 4:
str += '♦'
if card[1] == 1:
str += 'A'
elif card[1] == 11:
str += 'J'
elif card[1] == 12:
str += 'Q'
elif card[1] == 13:
str += 'K'
else:
str += card[1].__str__()
return str
def random_card_list():
l = []
total = 0
for i in range(1, 5):
for j in range(1, 14):
from random import randint
pos = randint(0, total)
total = total + 1
l.insert(pos, (i, j))
return l
def final_calc():
pass
| 23.373134 | 63 | 0.574074 |
4a1b2b1013a8d5179765cd8d0f0ce23fc70cb883
| 8,281 |
py
|
Python
|
sdk/python/pulumi_azure/compute/availability_set.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/compute/availability_set.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/compute/availability_set.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class AvailabilitySet(pulumi.CustomResource):
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
managed: pulumi.Output[bool]
"""
Specifies whether the availability set is managed or not. Possible values are `true` (to specify aligned) or `false` (to specify classic). Default is `true`.
"""
name: pulumi.Output[str]
"""
Specifies the name of the availability set. Changing this forces a new resource to be created.
"""
platform_fault_domain_count: pulumi.Output[float]
"""
Specifies the number of fault domains that are used. Defaults to `3`.
"""
platform_update_domain_count: pulumi.Output[float]
"""
Specifies the number of update domains that are used. Defaults to `5`.
"""
proximity_placement_group_id: pulumi.Output[str]
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the availability set. Changing this forces a new resource to be created.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, location=None, managed=None, name=None, platform_fault_domain_count=None, platform_update_domain_count=None, proximity_placement_group_id=None, resource_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an Availability Set for Virtual Machines.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_availability_set = azure.compute.AvailabilitySet("exampleAvailabilitySet",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tags={
"environment": "Production",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] managed: Specifies whether the availability set is managed or not. Possible values are `true` (to specify aligned) or `false` (to specify classic). Default is `true`.
:param pulumi.Input[str] name: Specifies the name of the availability set. Changing this forces a new resource to be created.
:param pulumi.Input[float] platform_fault_domain_count: Specifies the number of fault domains that are used. Defaults to `3`.
:param pulumi.Input[float] platform_update_domain_count: Specifies the number of update domains that are used. Defaults to `5`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the availability set. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
__props__['managed'] = managed
__props__['name'] = name
__props__['platform_fault_domain_count'] = platform_fault_domain_count
__props__['platform_update_domain_count'] = platform_update_domain_count
__props__['proximity_placement_group_id'] = proximity_placement_group_id
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
super(AvailabilitySet, __self__).__init__(
'azure:compute/availabilitySet:AvailabilitySet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, location=None, managed=None, name=None, platform_fault_domain_count=None, platform_update_domain_count=None, proximity_placement_group_id=None, resource_group_name=None, tags=None):
"""
Get an existing AvailabilitySet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[bool] managed: Specifies whether the availability set is managed or not. Possible values are `true` (to specify aligned) or `false` (to specify classic). Default is `true`.
:param pulumi.Input[str] name: Specifies the name of the availability set. Changing this forces a new resource to be created.
:param pulumi.Input[float] platform_fault_domain_count: Specifies the number of fault domains that are used. Defaults to `3`.
:param pulumi.Input[float] platform_update_domain_count: Specifies the number of update domains that are used. Defaults to `5`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the availability set. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = location
__props__["managed"] = managed
__props__["name"] = name
__props__["platform_fault_domain_count"] = platform_fault_domain_count
__props__["platform_update_domain_count"] = platform_update_domain_count
__props__["proximity_placement_group_id"] = proximity_placement_group_id
__props__["resource_group_name"] = resource_group_name
__props__["tags"] = tags
return AvailabilitySet(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 55.952703 | 280 | 0.709455 |
4a1b2baa9af6e1327dab212033d717281816cf22
| 3,853 |
py
|
Python
|
tests/e2e/test_navigation_page_non_student.py
|
jaredsexton/Submitty
|
bcc778514299bfabf574131f5eacce3732c695e1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/e2e/test_navigation_page_non_student.py
|
jaredsexton/Submitty
|
bcc778514299bfabf574131f5eacce3732c695e1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/e2e/test_navigation_page_non_student.py
|
jaredsexton/Submitty
|
bcc778514299bfabf574131f5eacce3732c695e1
|
[
"BSD-3-Clause"
] | null | null | null |
from e2e.base_testcase import BaseTestCase
class TestNavigationPageNonStudent(BaseTestCase):
def __init__(self, testname):
super().__init__(testname, log_in=False)
def test_instructor(self):
self.log_in(user_id="instructor", user_name="Quinn")
self.click_class('sample', 'SAMPLE')
elements = self.driver.find_elements_by_class_name('nav-title-row')
self.assertEqual(6, len(elements))
self.assertEqual("future", elements[0].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element_by_id('future_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("beta", elements[1].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element_by_id('beta_tbody')
.find_elements_by_class_name('gradeable_row')))
self.assertEqual("open", elements[2].get_attribute('id'))
self.assertEqual(2, len(self.driver
.find_element_by_id('open_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("closed", elements[3].get_attribute('id'))
self.assertEqual(1, len(self.driver
.find_element_by_id('closed_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("items_being_graded", elements[4].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element_by_id('items_being_graded_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("graded", elements[5].get_attribute('id'))
self.assertEqual(8, len(self.driver
.find_element_by_id('graded_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual(6, len(self.driver.find_element_by_class_name(
'gradeable_row').find_elements_by_tag_name('td')))
def test_ta(self):
self.log_in(user_id="ta", user_name="Jill")
self.click_class('sample', 'SAMPLE')
elements = self.driver.find_elements_by_class_name('nav-title-row')
self.assertEqual(5, len(elements))
self.assertEqual("beta", elements[0].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element_by_id('beta_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("open", elements[1].get_attribute('id'))
self.assertEqual(2, len(self.driver
.find_element_by_id('open_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("closed", elements[2].get_attribute('id'))
self.assertEqual(1, len(self.driver
.find_element_by_id('closed_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("items_being_graded", elements[3].get_attribute('id'))
self.assertEqual(3, len(self.driver
.find_element_by_id('items_being_graded_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual("graded", elements[4].get_attribute('id'))
self.assertEqual(8, len(self.driver
.find_element_by_id('graded_tbody')
.find_elements_by_class_name("gradeable_row")))
self.assertEqual(6, len(self.driver.find_element_by_class_name(
'gradeable_row').find_elements_by_tag_name('td')))
if __name__ == "__main__":
import unittest
unittest.main()
| 52.780822 | 80 | 0.60135 |
4a1b2c2c57b9b53e7008f60e376ed021d3bc236b
| 4,478 |
py
|
Python
|
src/syntren.py
|
compmedlab/gGAN
|
e157bd76438ef6632ed59ea95677cd41136f84d1
|
[
"MIT"
] | null | null | null |
src/syntren.py
|
compmedlab/gGAN
|
e157bd76438ef6632ed59ea95677cd41136f84d1
|
[
"MIT"
] | null | null | null |
src/syntren.py
|
compmedlab/gGAN
|
e157bd76438ef6632ed59ea95677cd41136f84d1
|
[
"MIT"
] | 1 |
2020-12-15T16:37:07.000Z
|
2020-12-15T16:37:07.000Z
|
from data_pipeline import reg_network, get_gene_symbols
import numpy as np
import pandas as pd
DEFAULT_ROOT_GENE = 'CRP'
DEFAULT_EVIDENCE = 'Weak'
DEFAULT_DEPTH = np.inf
SYNTREN_DATA_DIR = '../data/syntren/' # '../../syntren1.2release/data/'
NETWORK_FILE = SYNTREN_DATA_DIR + 'source_networks/EColi_n{}_r{}_e{}_d{}.sif'
RESULTS_FILE = SYNTREN_DATA_DIR + 'results/nn{}_nbgr{}_hop{}_bionoise{}_expnoise{}_corrnoise{' \
'}_neighAdd_{}_dataset.txt'
DEFAULT_BACKGROUND_NODES = 0
DEFAULT_HOP = 0.3 # Probability for complex 2-regulator interactions
DEFAULT_BIONOISE = 0.1
DEFAULT_EXPNOISE = 0.1
DEFAULT_CORRNOISE = 0.1
def dump_network(root_gene=DEFAULT_ROOT_GENE, minimum_evidence=DEFAULT_EVIDENCE, depth=DEFAULT_DEPTH, break_loops=True):
"""
Writes network for root_gene in SyNTReN format (directory: SYNTREN_DATA_DIR)
:param root_gene: Gene on top of the hierarchy
:param minimum_evidence: Interactions with a strength below this level will be discarded.
Possible values: 'confirmed', 'strong', 'weak'
:param depth: Ignores genes that are not in the first max_depth levels of the hierarchy
:param break_loops: Whether to break the loops from lower (or equal) to upper levels in the hierarchy.
If True, the resulting network is a Directed Acyclic Graph (DAG).
"""
gs = get_gene_symbols()
nodes, edges = reg_network(gs, root_gene, minimum_evidence, depth, break_loops)
nb_nodes = len(nodes)
file_name = NETWORK_FILE.format(nb_nodes, root_gene, minimum_evidence, depth)
with open(file_name, 'w') as f:
for tf, tg_dict in edges.items():
for tg, reg_type in tg_dict.items():
s_reg_type = 'ac'
if reg_type == '-':
s_reg_type = 're'
elif reg_type == '+-':
s_reg_type = 'du'
elif reg_type == '?':
s_reg_type = 'du'
print('Warning: Unknown regulation type found for interaction {}->{}'.format(tf, tg))
elif reg_type != '+':
raise ValueError('Invalid regulation type')
line = '{}\t{}\t{}\n'.format(tf, s_reg_type, tg)
f.write(line)
print('Finished preparing SynTReN network\nFile:{}'.format(file_name))
def syntren_results(root_gene=DEFAULT_ROOT_GENE, minimum_evidence=DEFAULT_EVIDENCE, depth=DEFAULT_DEPTH,
break_loops=True, nb_background=DEFAULT_BACKGROUND_NODES, hop=DEFAULT_HOP,
bionoise=DEFAULT_BIONOISE, expnoise=DEFAULT_EXPNOISE, corrnoise=DEFAULT_CORRNOISE,
normalized=True):
"""
Reads SynTReN results
:param root_gene: Gene on top of the hierarchy
:param minimum_evidence: Interactions with a strength below this level will be discarded.
Possible values: 'confirmed', 'strong', 'weak'
:param depth: Ignores genes that are not in the first max_depth levels of the hierarchy
:param break_loops: Whether to break the loops from lower (or equal) to upper levels in the hierarchy.
If True, the resulting network is a Directed Acyclic Graph (DAG).
:param nb_background: Number of background nodes
:param hop: Probability for complex 2-regulator interactions
:param bionoise: Biological noise [0, 1]
:param expnoise: Experimental noise [0, 1]
:param corrnoise: Noise on correlated inputs [0, 1]
:param normalized: Whether to get SynTReN normalized or unnormalized data
:return: expression matrix with Shape=(nb_samples, nb_genes), and list of gene symbols
"""
# Read results
gs = get_gene_symbols()
nodes, edges = reg_network(gs, root_gene, minimum_evidence, depth, break_loops)
nb_nodes = len(nodes)
norm = 'maxExpr1'
if not normalized:
norm = 'unnormalized'
file_name = RESULTS_FILE.format(nb_nodes, nb_background, hop, bionoise, expnoise, corrnoise, norm)
df = pd.read_csv(file_name, delimiter='\t')
# Get data and discard background genes
symbols = df['GENE'].values
gene_symbols, backgr_symbols = symbols[:nb_nodes], symbols[nb_nodes:]
expr = df.loc[:, df.columns != 'GENE'].values.T
expr, expr_background = expr[:, :nb_nodes], expr[:, nb_nodes:]
return expr, list(gene_symbols)
if __name__ == '__main__':
dump_network(minimum_evidence='Weak')
expr, gene_symbols = syntren_results(minimum_evidence='Weak', nb_background=0)
| 48.150538 | 120 | 0.678205 |
4a1b2c8f1ecdcd2ed1bf83d185069db29b5692fb
| 649 |
py
|
Python
|
remoteweb/cli/keygen.py
|
hinihat/remoteweb
|
ae564a016e0bfa6c371385c53138652256c57bca
|
[
"MIT"
] | null | null | null |
remoteweb/cli/keygen.py
|
hinihat/remoteweb
|
ae564a016e0bfa6c371385c53138652256c57bca
|
[
"MIT"
] | null | null | null |
remoteweb/cli/keygen.py
|
hinihat/remoteweb
|
ae564a016e0bfa6c371385c53138652256c57bca
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022 Shuhei Nitta. All rights reserved.
import click
from remoteweb import crypto
from remoteweb.cli import root
@root.main.command(
name="keygen",
help="Generate a pair of key"
)
@click.option(
"--bits", "-b",
type=int,
default=2048,
help="Bits of the key",
show_default=True
)
@click.option(
"--out", "-o",
type=click.types.Path(),
default="./remoteweb_key",
help="Output path of the key",
show_default=True
)
def generate_key(
out: str,
bits: int
) -> None:
crypto.create_key(
bits=bits,
privatekey_path=out,
publickey_path=out + ".pub"
)
| 18.542857 | 55 | 0.619414 |
4a1b2cf382dea4aaec1c8644bfeffd058b478e8a
| 114 |
py
|
Python
|
keras_retinanet/layers/__init__.py
|
iver56/keras-retinanet
|
83feca1aa49a8a75ed5d4a2ab43d8c18c6cce3f7
|
[
"Apache-2.0"
] | 3 |
2020-08-28T11:25:41.000Z
|
2022-01-16T08:09:15.000Z
|
keras_retinanet/layers/__init__.py
|
iver56/keras-retinanet
|
83feca1aa49a8a75ed5d4a2ab43d8c18c6cce3f7
|
[
"Apache-2.0"
] | 11 |
2020-01-28T22:29:48.000Z
|
2022-03-11T23:29:39.000Z
|
keras_retinanet/layers/__init__.py
|
iver56/keras-retinanet
|
83feca1aa49a8a75ed5d4a2ab43d8c18c6cce3f7
|
[
"Apache-2.0"
] | 2 |
2021-12-03T17:23:21.000Z
|
2021-12-04T11:50:12.000Z
|
from ._misc import RegressBoxes, UpsampleLike, Anchors, ClipBoxes
from .filter_detections import FilterDetections
| 38 | 65 | 0.859649 |
4a1b2d0c5b5ab32223ba87617f6b3acbd9f6d500
| 8,365 |
py
|
Python
|
BSMDEMeasuresEstimation/MeasuresCode/CycleFailure/readlinks.py
|
OSADP/CVD-DME
|
84e40c7c0105d90d9cf6392140101e6d5b88911e
|
[
"Apache-2.0"
] | null | null | null |
BSMDEMeasuresEstimation/MeasuresCode/CycleFailure/readlinks.py
|
OSADP/CVD-DME
|
84e40c7c0105d90d9cf6392140101e6d5b88911e
|
[
"Apache-2.0"
] | null | null | null |
BSMDEMeasuresEstimation/MeasuresCode/CycleFailure/readlinks.py
|
OSADP/CVD-DME
|
84e40c7c0105d90d9cf6392140101e6d5b88911e
|
[
"Apache-2.0"
] | null | null | null |
def all_links(bnlinks):
all_key_links = []
for bottleneck in bnlinks.keys():
for i, stem in enumerate(bnlinks[bottleneck].keys()):
for lnk in bnlinks[bottleneck][stem]:
all_key_links.append(lnk[0])
return all_key_links
def read_link_file(filename):
bnlinks = {}
for line in open (filename):
row = line.strip().split(',')
if row[0] not in bnlinks.keys():
bnlinks[row[0]] = {}
if row[1] not in bnlinks[row[0]].keys():
bnlinks[row[0]][row[1]] = []
for col in range(2,len(row),3):
if row[col] == '':
break
bnlinks[row[0]][row[1]].append( (int(row[col]), int(row[col+1]), float(row[col+2])) )
keylinks = all_links(bnlinks)
return bnlinks, keylinks
def read_intersection_file(filename):
inlinks = {}
for line in open (filename):
row = line.split(',')
if str(row[1]).strip() not in inlinks.keys():
inlinks[str(row[1]).strip()] = str(row[0])
return inlinks
def read_intersection_cycle_file(filename):
cycles = {}
for line in open(filename):
row = line.split(',')
cycles[row[0]] = []
for time in row[1:]:
cycles[row[0]].append(float(time))
return cycles
def read_greentimes_file(lsa_file):
green_times = {}
red_times = {}
#green_times[controller_num][signal_group] = [list of greentimes]
#FROM VISSIM
# for lsa_line in open(lsa_file):
# lsa_row = lsa_line.split(';')
# if lsa_row[4].strip() == 'green':
# if lsa_row[2].strip() not in green_times.keys():
# green_times[lsa_row[2].strip()] = {}
# if lsa_row[3].strip() not in green_times[lsa_row[2].strip()].keys():
# green_times[lsa_row[2].strip()][lsa_row[3].strip()] = []
# green_times[lsa_row[2].strip()][lsa_row[3].strip()].append(float(lsa_row[0].strip()))
# elif lsa_row[4].strip() == 'red':
# if lsa_row[2].strip() not in green_times.keys():
# continue
# if lsa_row[3].strip() not in green_times[lsa_row[2].strip()].keys():
# continue
# if lsa_row[2].strip() not in red_times.keys():
# red_times[lsa_row[2].strip()] = {}
# if lsa_row[3].strip() not in red_times[lsa_row[2].strip()].keys():
# red_times[lsa_row[2].strip()][lsa_row[3].strip()] = []
# red_times[lsa_row[2].strip()][lsa_row[3].strip()].append(float(lsa_row[0].strip()))
for lsa_line in open(lsa_file):
lsa_row = lsa_line.split(',')
if lsa_row[2].strip() == 'green':
if lsa_row[0].strip() not in green_times.keys():
green_times[lsa_row[0].strip()] = {}
if lsa_row[1].strip() not in green_times[lsa_row[0].strip()].keys():
green_times[lsa_row[0].strip()][lsa_row[1].strip()] = []
green_times[lsa_row[0].strip()][lsa_row[1].strip()].append(float(lsa_row[3].strip()))
elif lsa_row[2].strip() == 'red':
if lsa_row[0].strip() not in green_times.keys():
continue
if lsa_row[1].strip() not in green_times[lsa_row[0].strip()].keys():
continue
if lsa_row[0].strip() not in red_times.keys():
red_times[lsa_row[0].strip()] = {}
if lsa_row[1].strip() not in red_times[lsa_row[0].strip()].keys():
red_times[lsa_row[0].strip()][lsa_row[1].strip()] = []
red_times[lsa_row[0].strip()][lsa_row[1].strip()].append(float(lsa_row[3].strip()))
return green_times, red_times
# Read in a csv with the following data in order: intersection name, stem, controller number, signal group number
def read_signal_controllers_file(filename):
intersections = {}
#intersections[name][stem]={controllernum,signalgroup,prev ids, current greentime, current ids}
for line in open(filename):
row = line.split(',')
if row[0] not in intersections.keys():
intersections[row[0]] = {}
if row[1] not in intersections[row[0]].keys():
intersections[row[0]][row[1]] = {'controller_num': row[2],
'signal_group': row[3],
'prev_greentime_ids':[],
'current_greentime':'1.0',
'current_greentime_ids':[],
}
return intersections
def read_routes(filename):
routes = {}
for line in open(filename):
row = line.strip().split(',')
route_group = row[0]
route = row[1]
route_origin = float(row[2])
route_destination = float(row[3])
x = float(row[4]) * (100 / 2.54 / 12)
y = float(row[5]) * (100 / 2.54 / 12)
if route_group not in routes.keys():
routes[route_group] = {}
if route not in routes[route_group].keys():
routes[route_group][route] = {'route_origin': route_origin,
'route_destination': route_destination,
'x': x,
'y': y,
}
return routes
def read_traveltimes_file(filename):
travel_times = {}
for line in open(filename):
row = line.strip().split(',')
route_group = row[0]
route_num = row[1]
minute = row[2]
avg_ttime = row[3]
if route_group not in travel_times:
travel_times[route_group] = {}
if route_num not in travel_times[route_group]:
travel_times[route_group][route_num] = {}
if minute not in travel_times[route_group][route_num]:
travel_times[route_group][route_num][minute] = float(avg_ttime)
else:
print 'Error'
print route_group
print route_num
print minute
return travel_times
def read_speed_limit(filename):
speed_limits = {}
for line in open(filename):
row = line.strip().split(',')
link = float(row[0])
speed = float(row[1])
speed_limits[link] = speed
return speed_limits
def read_link_length(filename):
link_lengths = {}
for line in open(filename):
row = line.strip().split(',')
link = float(row[0])
length = float(row[1])
link_lengths[link] = length
return link_lengths
def read_full_routes(filename):
routes = {}
for line in open(filename):
row = line.strip().split(',')
route_group = row[0]
route = row[1]
if route_group not in routes.keys():
routes[route_group] = {}
if route not in routes[route_group].keys():
routes[route_group][route] = []
for col in range(2,len(row)):
if row[col] == '':
break
routes[route_group][route].append(float(row[col]))
return routes
def read_endpoints(filename):
endpoints = {}
for line in open(filename):
row = line.strip().split(',')
link = float(row[0])
if link not in endpoints.keys():
endpoints[link] = {}
endpoints[link]["x"] = float(row[1]) * (100 / 2.54 / 12)
endpoints[link]["y"] = float(row[2]) * (100 / 2.54 / 12)
return endpoints
###########################################
# file = r'D:\Data\Tasks\FHWA\Current\DCM_Contract\BSM Emulator\GT_Coding\superlinks_list_VanNess.csv'
#
# read_link_file(file)
#
# super_links, all_key_links = read_link_file(file)
#
# print all_key_links
# print super_links
# file = r'C:\Users\M29565\Documents\Projects\tca\GT_code\CycleFailure\superlinks_list_VanNess_intersections.csv'
# int_links = read_intersection_file(file)
# print int_links
# intersection_map = {}
# for int_name in int_links.values():
# if int_name not in intersection_map.keys():
# intersection_map[int_name] = {'prev_cycle_ids':[1,2],
# 'current_cycle':0.0,
# 'cycle_ids':[]
# }
# for int_name in int_links.values():
# intersection_map[int_name]['prev_cycle_ids'].extend([2,3])
# print intersection_map
# file = r'C:\Users\M29565\Documents\Projects\tca\GT_code\CycleFailure\intersection_cycle_times.csv'
# cycles = read_intersection_cycle_file(file)
# print cycles
# file = r'C:\Users\M29565\Documents\Projects\tca\GT_code\CycleFailure\medDemand_test.lsa'
# green_times = read_greentimes_file(file)
# print green_times
# file = r'C:\Users\M29565\Documents\Projects\tca\GT_code\CycleFailure\vanness_greentimes.csv'
# cycles = read_cycle_file(file)
# print cycles
| 30.867159 | 113 | 0.593425 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.