text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-14 17:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("election_snooper", "0002_auto_20170314_1754")]
operations = [
migrations.AddField(
model_name="snoopedelection",
name="extra",
field=models.TextField(blank=True),
)
]
|
DemocracyClub/EveryElection
|
every_election/apps/election_snooper/migrations/0003_snoopedelection_extra.py
|
Python
|
bsd-3-clause
| 447 | 0 |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import contextlib
import errno
import os
import random
import signal
import socket
import subprocess
import sys
import time
import urllib2
try:
xrange
except NameError:
xrange = range
if __name__ == "__main__":
tornado_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../..'))
# dev_appserver doesn't seem to set SO_REUSEADDR
port = random.randrange(10000, 11000)
# does dev_appserver.py ever live anywhere but /usr/local/bin?
proc = subprocess.Popen([sys.executable,
"/usr/local/bin/dev_appserver.py",
os.path.dirname(os.path.abspath(__file__)),
"--port=%d" % port,
"--skip_sdk_update_check",
],
cwd=tornado_root)
try:
for i in xrange(50):
with contextlib.closing(socket.socket()) as sock:
err = sock.connect_ex(('localhost', port))
if err == 0:
break
elif err != errno.ECONNREFUSED:
raise Exception("Got unexpected socket error %d" % err)
time.sleep(0.1)
else:
raise Exception("Server didn't start listening")
resp = urllib2.urlopen("http://localhost:%d/" % port)
print(resp.read())
finally:
# dev_appserver sometimes ignores SIGTERM (especially on 2.5),
# so try a few times to kill it.
for sig in [signal.SIGTERM, signal.SIGTERM, signal.SIGKILL]:
os.kill(proc.pid, sig)
res = os.waitpid(proc.pid, os.WNOHANG)
if res != (0, 0):
break
time.sleep(0.1)
else:
os.waitpid(proc.pid, 0)
|
Lancher/tornado
|
maint/test/appengine/common/runtests.py
|
Python
|
apache-2.0
| 1,918 | 0 |
class CollectData():
"""每小时数据收集类
利用微博高级搜索功能,按关键字搜集一定时间范围内的微博。
大体思路:构造URL,爬取网页,然后解析网页中的微博ID。后续利用微博API进行数据入库。本程序只负责收集微博的ID。
登陆新浪微博,进入高级搜索,输入关键字”空气污染“,选择”实时“,时间为”2013-07-02-2:2013-07-09-2“,地区为”北京“,之后发送请求会发现地址栏变为如下:
http://s.weibo.com/wb/%25E7%25A9%25BA%25E6%25B0%2594%25E6%25B1%25A1%25E6%259F%2593&xsort=time®ion=custom:11:1000×cope=custom:2013-07-02-2:2013-07-09-2&Refer=g
固定地址部分:http://s.weibo.com/wb/
关键字二次UTF-8编码:%25E7%25A9%25BA%25E6%25B0%2594%25E6%25B1%25A1%25E6%259F%2593
排序为“实时”:xsort=time
搜索地区:region=custom:11:1000
搜索时间范围:timescope=custom:2013-07-02-2:2013-07-09-2
可忽略项:Refer=g
显示类似微博:nodup=1 注:这个选项可多收集微博,建议加上。默认不加此参数,省略了部分相似微博。
某次请求的页数:page=1
另外,高级搜索最多返回50页微博,那么时间间隔设置最小为宜。所以该类设置为搜集一定时间段内最多50页微博。
"""
def __init__(self, keyword, startTime, region, savedir, interval='50', flag=True, begin_url_per = "http://s.weibo.com/weibo/"):
self.begin_url_per = begin_url_per #设置固定地址部分,默认为"http://s.weibo.com/weibo/",或者"http://s.weibo.com/wb/"
self.setKeyword(keyword) #设置关键字
self.setStartTimescope(startTime) #设置搜索的开始时间
self.setRegion(region) #设置搜索区域
self.setSave_dir(savedir) #设置结果的存储目录
self.setInterval(interval) #设置邻近网页请求之间的基础时间间隔(注意:过于频繁会被认为是机器人)
self.setFlag(flag) #设置
self.logger = logging.getLogger('main.CollectData') #初始化日志
##设置关键字
##关键字需解码
def setKeyword(self, keyword):
self.keyword = keyword.decode('GBK').encode("utf-8")
print 'twice encode:',self.getKeyWord()
##设置起始范围,间隔为1小时
##格式为:yyyy-mm-dd-HH
def setStartTimescope(self, startTime):
if not (startTime == '-'):
self.timescope = startTime + ":" + startTime
else:
self.timescope = '-'
##设置搜索地区
def setRegion(self, region):
self.region = region
##设置结果的存储目录
def setSave_dir(self, save_dir):
self.save_dir = save_dir
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
##设置邻近网页请求之间的基础时间间隔
def setInterval(self, interval):
self.interval = int(interval)
##设置是否被认为机器人的标志。若为False,需要进入页面,手动输入验证码
def setFlag(self, flag):
self.flag = flag
##构建URL
def getURL(self):
return self.begin_url_per+self.getKeyWord()+"®ion=custom:"+self.region+"&xsort=time×cope=custom:"+self.timescope+"&nodup=1&page="
##关键字需要进行两次urlencode
def getKeyWord(self):
once = urllib.urlencode({"kw":self.keyword})[3:]
return urllib.urlencode({"kw":once})[3:]
##爬取一次请求中的所有网页,最多返回50页
def download(self, url, maxTryNum=4):
content = open(self.save_dir + os.sep + "weibo_ids.txt", "ab") #向结果文件中写微博ID
hasMore = True #某次请求可能少于50页,设置标记,判断是否还有下一页
isCaught = False #某次请求被认为是机器人,设置标记,判断是否被抓住。抓住后,需要复制log中的文件,进入页面,输入验证码
mid_filter = set([]) #过滤重复的微博ID
i = 1 #记录本次请求所返回的页数
while hasMore and i < 51 and (not isCaught): #最多返回50页,对每页进行解析,并写入结果文件
source_url = url + str(i) #构建某页的URL
data = '' #存储该页的网页数据
goon = True #网络中断标记
##网络不好的情况,试着尝试请求三次
for tryNum in range(maxTryNum):
try:
html = urllib2.urlopen(source_url, timeout=12)
data = html.read()
break
except:
if tryNum < (maxTryNum-1):
time.sleep(10)
else:
print 'Internet Connect Error!'
self.logger.error('Internet Connect Error!')
self.logger.info('filePath: ' + savedir)
self.logger.info('url: ' + source_url)
self.logger.info('fileNum: ' + str(fileNum))
self.logger.info('page: ' + str(i))
self.flag = False
goon = False
break
if goon:
lines = data.splitlines()
isCaught = True
for line in lines:
## 判断是否有微博内容,出现这一行,则说明没有被认为是机器人
if line.startswith('<script>STK && STK.pageletM && STK.pageletM.view({"pid":"pl_weibo_direct"'):
isCaught = False
n = line.find('html":"')
if n > 0:
j = line[n + 7: -12].encode("utf-8").decode('unicode_escape').encode("utf-8").replace("\\", "")
## 没有更多结果页面
if (j.find('<div class="search_noresult">') > 0):
hasMore = False
## 有结果的页面
else:
page = etree.HTML(j)
dls = page.xpath(u"//dl") #使用xpath解析
for dl in dls:
mid = str(dl.attrib.get('mid'))
if(mid != 'None' and mid not in mid_filter):
mid_filter.add(mid)
content.write(mid)
content.write('\n')
break
lines = None
## 处理被认为是机器人的情况
if isCaught:
print 'Be Caught!'
self.logger.error('Be Caught Error!')
self.logger.info('filePath: ' + savedir)
self.logger.info('url: ' + source_url)
self.logger.info('fileNum: ' + str(fileNum))
self.logger.info('page:' + str(i))
data = None
self.flag = False
break
## 没有更多结果,结束该次请求,跳到下一个请求
if not hasMore:
print 'No More Results!'
if i == 1:
time.sleep(random.randint(55,75))
else:
time.sleep(15)
data = None
break
i += 1
## 设置两个邻近URL请求之间的随机休眠时间,你懂的。目前没有模拟登陆
sleeptime_one = random.randint(self.interval-30,self.interval-10)
sleeptime_two = random.randint(self.interval+10,self.interval+30)
if i%2 == 0:
sleeptime = sleeptime_two
else:
sleeptime = sleeptime_one
print 'sleeping ' + str(sleeptime) + ' seconds...'
time.sleep(sleeptime)
else:
break
content.close()
content = None
##改变搜索的时间范围,有利于获取最多的数据
def getTimescope(self, perTimescope, hours):
if not (perTimescope=='-'):
times_list = perTimescope.split(':')
start_datetime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(times_list[-1],"%Y-%m-%d-%H")))
start_new_datetime = start_datetime + datetime.timedelta(seconds = 3600)
end_new_datetime = start_new_datetime + datetime.timedelta(seconds = 3600*(hours-1))
start_str = start_new_datetime.strftime("%Y-%m-%d-%H")
end_str = end_new_datetime.strftime("%Y-%m-%d-%H")
return start_str + ":" + end_str
else:
return '-'
def main():
logger = logging.getLogger('main')
logFile = './collect.log'
logger.setLevel(logging.DEBUG)
filehandler = logging.FileHandler(logFile)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s')
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
while True:
## 接受键盘输入
keyword = raw_input('Enter the keyword(type \'quit\' to exit ):')
if keyword == 'quit':
sys.exit()
startTime = raw_input('Enter the start time(Format:YYYY-mm-dd-HH):')
region = raw_input('Enter the region([BJ]11:1000,[SH]31:1000,[GZ]44:1,[CD]51:1):')
savedir = raw_input('Enter the save directory(Like C://data//):')
interval = raw_input('Enter the time interval( >30 and deafult:50):')
##实例化收集类,收集指定关键字和起始时间的微博
cd = CollectData(keyword, startTime, region, savedir, interval)
while cd.flag:
print cd.timescope
logger.info(cd.timescope)
url = cd.getURL()
cd.download(url)
cd.timescope = cd.getTimescope(cd.timescope,1) #改变搜索的时间,到下一个小时
else:
cd = None
print '-----------------------------------------------------'
print '-----------------------------------------------------'
else:
logger.removeHandler(filehandler)
logger = None
if __name__ == '__main__':
main()
|
osDanielLee/SelfThinkingRobot
|
AchieveData/CollectData.py
|
Python
|
bsd-3-clause
| 8,610 | 0.038329 |
# -*- coding: utf-8 -*-
"""
Tests for WFS-T provider using QGIS Server through qgis_wrapped_server.py.
This is an integration test for QGIS Desktop WFS-T provider and QGIS Server
WFS-T that check if QGIS can talk to and uderstand itself.
The test uses testdata/wfs_transactional/wfs_transactional.qgs and three
initially empty shapefiles layrs with points, lines and polygons.
All WFS-T calls are executed through the QGIS WFS data provider.
The three layers are
1. populated with WFS-T
2. checked for geometry and attributes
3. modified with WFS-T
4. checked for geometry and attributes
5. emptied with WFS-T calls to delete
From build dir, run: ctest -R PyQgsServerWFST -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '05/15/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import re
import subprocess
from shutil import copytree, rmtree
import tempfile
from utilities import unitTestDataPath, waitServer
from qgis.core import (
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsPoint,
QgsRectangle,
QgsFeatureRequest,
QgsExpression,
)
from qgis.testing import (
start_app,
unittest,
)
try:
QGIS_SERVER_WFST_PORT = os.environ['QGIS_SERVER_WFST_PORT']
except:
QGIS_SERVER_WFST_PORT = '0' # Auto
qgis_app = start_app()
class TestWFST(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.port = QGIS_SERVER_WFST_PORT
# Create tmp folder
cls.temp_path = tempfile.mkdtemp()
cls.testdata_path = cls.temp_path + '/' + 'wfs_transactional' + '/'
copytree(unitTestDataPath('wfs_transactional') + '/',
cls.temp_path + '/' + 'wfs_transactional')
cls.project_path = cls.temp_path + '/' + 'wfs_transactional' + '/' + \
'wfs_transactional.qgs'
assert os.path.exists(cls.project_path), "Project not found: %s" % \
cls.project_path
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
# Clear all test layers
for ln in ['test_point', 'test_polygon', 'test_linestring']:
cls._clearLayer(ln)
os.environ['QGIS_SERVER_PORT'] = str(cls.port)
server_path = os.path.dirname(os.path.realpath(__file__)) + \
'/qgis_wrapped_server.py'
cls.server = subprocess.Popen([sys.executable, server_path],
env=os.environ, stdout=subprocess.PIPE)
line = cls.server.stdout.readline()
cls.port = int(re.findall(b':(\d+)', line)[0])
assert cls.port != 0
# Wait for the server process to start
assert waitServer('http://127.0.0.1:%s' % cls.port), "Server is not responding!"
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
cls.server.wait()
del cls.server
# Clear all test layers
for ln in ['test_point', 'test_polygon', 'test_linestring']:
cls._clearLayer(ln)
rmtree(cls.temp_path)
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _clearLayer(cls, layer_name):
"""
Delete all features from a vector layer
"""
layer = cls._getLayer(layer_name)
layer.startEditing()
layer.deleteFeatures([f.id() for f in layer.getFeatures()])
layer.commitChanges()
assert layer.featureCount() == 0
@classmethod
def _getLayer(cls, layer_name):
"""
OGR Layer factory
"""
path = cls.testdata_path + layer_name + '.shp'
layer = QgsVectorLayer(path, layer_name, "ogr")
assert layer.isValid()
return layer
@classmethod
def _getWFSLayer(cls, type_name, layer_name=None):
"""
WFS layer factory
"""
if layer_name is None:
layer_name = 'wfs_' + type_name
parms = {
'srsname': 'EPSG:4326',
'typename': type_name,
'url': 'http://127.0.0.1:%s/?map=%s' % (cls.port,
cls.project_path),
'version': 'auto',
'table': '',
#'sql': '',
}
uri = ' '.join([("%s='%s'" % (k, v)) for k, v in list(parms.items())])
wfs_layer = QgsVectorLayer(uri, layer_name, 'WFS')
assert wfs_layer.isValid()
return wfs_layer
@classmethod
def _getFeatureByAttribute(cls, layer, attr_name, attr_value):
"""
Find the feature and return it, raise exception if not found
"""
request = QgsFeatureRequest(QgsExpression("%s=%s" % (attr_name,
attr_value)))
try:
return next(layer.dataProvider().getFeatures(request))
except StopIteration:
raise Exception("Wrong attributes in WFS layer %s" %
layer.name())
def _checkAddFeatures(self, wfs_layer, layer, features):
"""
Check features were added
"""
wfs_layer.dataProvider().addFeatures(features)
layer = self._getLayer(layer.name())
self.assertTrue(layer.isValid())
self.assertEqual(layer.featureCount(), len(features))
self.assertEqual(wfs_layer.dataProvider().featureCount(), len(features))
def _checkUpdateFeatures(self, wfs_layer, old_features, new_features):
"""
Check features can be updated
"""
for i in range(len(old_features)):
f = self._getFeatureByAttribute(wfs_layer, 'id', old_features[i]['id'])
self.assertTrue(wfs_layer.dataProvider().changeGeometryValues({f.id(): new_features[i].geometry()}))
self.assertTrue(wfs_layer.dataProvider().changeAttributeValues({f.id(): {0: new_features[i]['id']}}))
self.assertTrue(wfs_layer.dataProvider().changeAttributeValues({f.id(): {1: new_features[i]['name']}}))
def _checkMatchFeatures(self, wfs_layer, features):
"""
Check feature attributes and geometry match
"""
for f in features:
wf = self._getFeatureByAttribute(wfs_layer, 'id', f['id'])
self.assertEqual(wf.geometry().exportToWkt(),
f.geometry().exportToWkt())
self.assertEqual(f['name'], wf['name'])
def _checkDeleteFeatures(self, layer, features):
"""
Delete features
"""
ids = []
for f in features:
wf = self._getFeatureByAttribute(layer, 'id', f['id'])
ids.append(wf.id())
self.assertTrue(layer.dataProvider().deleteFeatures(ids))
def _testLayer(self, wfs_layer, layer, old_features, new_features):
"""
Perform all test steps on the layer.
"""
self.assertEqual(wfs_layer.featureCount(), 0)
self._checkAddFeatures(wfs_layer, layer, old_features)
self._checkMatchFeatures(wfs_layer, old_features)
self.assertEqual(wfs_layer.dataProvider().featureCount(),
len(old_features))
self._checkUpdateFeatures(wfs_layer, old_features, new_features)
self._checkMatchFeatures(wfs_layer, new_features)
self._checkDeleteFeatures(wfs_layer, new_features)
self.assertEqual(wfs_layer.dataProvider().featureCount(), 0)
def testWFSPoints(self):
"""
Adds some points, then check and clear all
"""
layer_name = 'test_point'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.pendingFields())
feat1['id'] = 11
feat1.setGeometry(QgsGeometry.fromPoint(QgsPoint(9, 45)))
feat2 = QgsFeature(wfs_layer.pendingFields())
feat2.setGeometry(QgsGeometry.fromPoint(QgsPoint(9.5, 45.5)))
feat2['id'] = 12
old_features = [feat1, feat2]
# Change feat1
new_feat1 = QgsFeature(wfs_layer.pendingFields())
new_feat1['id'] = 121
new_feat1.setGeometry(QgsGeometry.fromPoint(QgsPoint(10, 46)))
new_features = [new_feat1, feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
def testWFSPointsMultipleEdits(self):
"""
Adds some points, then check.
Modify 2 points, then checks and clear all
"""
layer_name = 'test_point'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.pendingFields())
feat1['id'] = 11
feat1['name'] = 'name 11'
feat1.setGeometry(QgsGeometry.fromPoint(QgsPoint(9, 45)))
feat2 = QgsFeature(wfs_layer.pendingFields())
feat2.setGeometry(QgsGeometry.fromPoint(QgsPoint(9.5, 45.5)))
feat2['id'] = 12
feat2['name'] = 'name 12'
old_features = [feat1, feat2]
# Change feat1 and feat2
new_feat1 = QgsFeature(wfs_layer.pendingFields())
new_feat1['id'] = 121
new_feat1['name'] = 'name 121'
new_feat1.setGeometry(QgsGeometry.fromPoint(QgsPoint(10, 46)))
new_feat2 = QgsFeature(wfs_layer.pendingFields())
new_feat2['id'] = 122
new_feat2['name'] = 'name 122'
new_feat2.setGeometry(QgsGeometry.fromPoint(QgsPoint(10.5, 47)))
new_features = [new_feat1, new_feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
def testWFSPolygons(self):
"""
Adds some polygons, then check and clear all
"""
layer_name = 'test_polygon'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.pendingFields())
feat1['id'] = 11
feat1['name'] = 'name 11'
feat1.setGeometry(QgsGeometry.fromRect(QgsRectangle(QgsPoint(9, 45), QgsPoint(10, 46))))
feat2 = QgsFeature(wfs_layer.pendingFields())
feat2.setGeometry(QgsGeometry.fromRect(QgsRectangle(QgsPoint(9.5, 45.5), QgsPoint(10.5, 46.5))))
feat2['id'] = 12
feat2['name'] = 'name 12'
old_features = [feat1, feat2]
# Change feat1
new_feat1 = QgsFeature(wfs_layer.pendingFields())
new_feat1['id'] = 121
new_feat1['name'] = 'name 121'
new_feat1.setGeometry(QgsGeometry.fromRect(QgsRectangle(QgsPoint(10, 46), QgsPoint(11.5, 47.5))))
new_features = [new_feat1, feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
def testWFSLineStrings(self):
"""
Adds some lines, then check and clear all
"""
layer_name = 'test_linestring'
layer = self._getLayer(layer_name)
wfs_layer = self._getWFSLayer(layer_name)
feat1 = QgsFeature(wfs_layer.pendingFields())
feat1['id'] = 11
feat1['name'] = 'name 11'
feat1.setGeometry(QgsGeometry.fromPolyline([QgsPoint(9, 45), QgsPoint(10, 46)]))
feat2 = QgsFeature(wfs_layer.pendingFields())
feat2.setGeometry(QgsGeometry.fromPolyline([QgsPoint(9.5, 45.5), QgsPoint(10.5, 46.5)]))
feat2['id'] = 12
feat2['name'] = 'name 12'
old_features = [feat1, feat2]
# Change feat1
new_feat1 = QgsFeature(wfs_layer.pendingFields())
new_feat1['id'] = 121
new_feat1['name'] = 'name 121'
new_feat1.setGeometry(QgsGeometry.fromPolyline([QgsPoint(9.8, 45.8), QgsPoint(10.8, 46.8)]))
new_features = [new_feat1, feat2]
self._testLayer(wfs_layer, layer, old_features, new_features)
if __name__ == '__main__':
unittest.main()
|
gioman/QGIS
|
tests/src/python/test_qgsserver_wfst.py
|
Python
|
gpl-2.0
| 12,220 | 0.001227 |
import cProfile
from pathlib import Path
def main(args, results_dir: Path, scenario_dir: Path):
try:
scenario_dir.mkdir(parents=True)
except FileExistsError:
pass
cProfile.runctx(
'from dmprsim.scenarios.python_profile import main;'
'main(args, results_dir, scenario_dir)',
globals=globals(),
locals=locals(),
filename=str(results_dir / 'profile.pstats'),
)
|
reisub-de/dmpr-simulator
|
dmprsim/analyze/profile_core.py
|
Python
|
mit
| 432 | 0 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the Entry() global function and environment method work
correctly, and that the former does not try to expand construction
variables.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print Entry('ddd')
print Entry('$FOO')
print Entry('${BAR}_$BAR')
print env.Entry('eee')
print env.Entry('$FOO')
print env.Entry('${BAR}_$BAR')
""")
test.run(stdout = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
""", build_str = """\
scons: `.' is up to date.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Distrotech/scons
|
test/Entry.py
|
Python
|
mit
| 1,860 | 0.003226 |
from datetime import timedelta
import operator
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas.compat as compat
from pandas.compat import get_range_parameters, lrange, range
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.common import (
is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCSeries, ABCTimedeltaIndex)
from pandas.core import ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index
copy : bool, default False
Unused, accepted for homogeneity with other index types.
Attributes
----------
None
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = 'rangeindex'
_engine_type = libindex.Int64Engine
# --------------------------------------------------------------------
# Constructors
def __new__(cls, start=None, stop=None, step=None,
dtype=None, copy=False, name=None, fastpath=None):
if fastpath is not None:
warnings.warn("The 'fastpath' keyword is deprecated, and will be "
"removed in a future version.",
FutureWarning, stacklevel=2)
if fastpath:
return cls._simple_new(start, stop, step, name=name)
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
if name is None:
name = start.name
return cls._simple_new(name=name,
**dict(start._get_data_as_items()))
# validate the arguments
def ensure_int(value, field):
msg = ("RangeIndex(...) must be called with integers,"
" {value} was passed for {field}")
if not is_scalar(value):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
try:
new_value = int(value)
assert(new_value == value)
except (TypeError, ValueError, AssertionError):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
return new_value
if com._all_none(start, stop, step):
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
start = 0
else:
start = ensure_int(start, 'start')
if stop is None:
stop = start
start = 0
else:
stop = ensure_int(stop, 'stop')
if step is None:
step = 1
elif step == 0:
raise ValueError("Step must not be zero")
else:
step = ensure_int(step, 'step')
return cls._simple_new(start, stop, step, name)
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
""" Create RangeIndex from a range (py3), or xrange (py2) object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
start, stop, step = get_range_parameters(data)
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
@classmethod
def _simple_new(cls, start, stop=None, step=None, name=None,
dtype=None, **kwargs):
result = object.__new__(cls)
# handle passed None, non-integers
if start is None and stop is None:
# empty
start, stop, step = 0, 0, 1
if start is None or not is_integer(start):
try:
return RangeIndex(start, stop, step, name=name, **kwargs)
except TypeError:
return Index(start, stop, step, name=name, **kwargs)
result._start = start
result._stop = stop or 0
result._step = step or 1
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
# --------------------------------------------------------------------
@staticmethod
def _validate_dtype(dtype):
""" require dtype to be None or int64 """
if not (dtype is None or is_int64_dtype(dtype)):
raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
return np.arange(self._start, self._stop, self._step, dtype=np.int64)
@cache_readonly
def _int64index(self):
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
return [('start', self._start),
('stop', self._stop),
('step', self._step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (self.__class__, d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
# --------------------------------------------------------------------
@cache_readonly
def nbytes(self):
"""
Return the number of bytes in the underlying data
On implementations where this is undetermined (PyPy)
assume 24 bytes for each value
"""
return sum(getsizeof(getattr(self, v), 24) for v in
['_start', '_stop', '_step'])
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_unique(self):
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self):
return self._step < 0 or len(self) <= 1
@property
def has_duplicates(self):
return False
def tolist(self):
return lrange(self._start, self._stop, self._step)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
name = kwargs.get("name", self.name)
return RangeIndex._simple_new(
name=name, **dict(self._get_data_as_items()))
else:
kwargs.setdefault('name', self.name)
return self._int64index._shallow_copy(values, **kwargs)
@Appender(ibase._index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return RangeIndex._simple_new(
name=name, **dict(self._get_data_as_items()))
def _minmax(self, meth):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif ((meth == 'min' and self._step > 0) or
(meth == 'max' and self._step < 0)):
return self._start
return self._start + self._step * no_steps
def min(self, axis=None, skipna=True):
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('min')
def max(self, axis=None, skipna=True):
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('max')
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super(RangeIndex, self).equals(other)
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index
"""
self._validate_sort_keyword(sort)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super(RangeIndex, self).intersection(other, sort=sort)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
first = self[::-1] if self._step < 0 else self
second = other[::-1] if other._step < 0 else other
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first._start, second._start)
int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first._start + (second._start - first._start) * \
first._step // gcd * s
new_step = first._step * second._step // gcd
new_index = RangeIndex._simple_new(tmp_start, int_high, new_step)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps
def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super(RangeIndex, self).union(other)
if isinstance(other, RangeIndex):
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other)
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
if how == 'outer' and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers,
sort)
return super(RangeIndex, self).join(other, how, level, return_indexers,
sort)
def _concat_same_dtype(self, indexes, name):
return _concat._concat_rangeindex_same_dtype(indexes).rename(name)
def __len__(self):
"""
return the length of the RangeIndex
"""
return max(0, -(-(self._stop - self._start) // self._step))
@property
def size(self):
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
super_getitem = super(RangeIndex, self).__getitem__
if is_scalar(key):
if not lib.is_integer(key):
raise IndexError("only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices")
n = com.cast_scalar_indexer(key)
if n != key:
return super_getitem(key)
if n < 0:
n = len(self) + key
if n < 0 or n > len(self) - 1:
raise IndexError("index {key} is out of bounds for axis 0 "
"with size {size}".format(key=key,
size=len(self)))
return self._start + n * self._step
if isinstance(key, slice):
# This is basically PySlice_GetIndicesEx, but delegation to our
# super routines if we don't have integers
length = len(self)
# complete missing slice information
step = 1 if key.step is None else key.step
if key.start is None:
start = length - 1 if step < 0 else 0
else:
start = key.start
if start < 0:
start += length
if start < 0:
start = -1 if step < 0 else 0
if start >= length:
start = length - 1 if step < 0 else length
if key.stop is None:
stop = -1 if step < 0 else length
else:
stop = key.stop
if stop < 0:
stop += length
if stop < 0:
stop = -1
if stop > length:
stop = length
# delegate non-integer slices
if (start != int(start) or
stop != int(stop) or
step != int(step)):
return super_getitem(key)
# convert indexes to values
start = self._start + self._step * start
stop = self._start + self._step * stop
step = self._step * step
return RangeIndex._simple_new(start, stop, step, name=self.name)
# fall back to Int64Index
return super_getitem(key)
def __floordiv__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
if is_integer(other) and other != 0:
if (len(self) == 0 or
self._start % other == 0 and
self._step % other == 0):
start = self._start // other
step = self._step // other
stop = start + len(self) * step
return RangeIndex._simple_new(
start, stop, step, name=self.name)
if len(self) == 1:
start = self._start // other
return RangeIndex._simple_new(
start, start + 1, 1, name=self.name)
return self._int64index // other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = self._validate_for_numeric_binop(other, op)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(left._step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left._step
with np.errstate(all='ignore'):
rstart = op(left._start, right)
rstop = op(left._stop, right)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in
[rstart, rstop, rstep]):
result = result.astype('float64')
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv,
step=ops.rtruediv)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(operator.div, step=operator.div)
cls.__rdiv__ = _make_evaluate_binop(ops.rdiv, step=ops.rdiv)
RangeIndex._add_numeric_methods()
RangeIndex._add_logical_methods()
|
GuessWhoSamFoo/pandas
|
pandas/core/indexes/range.py
|
Python
|
bsd-3-clause
| 24,595 | 0 |
#!/usr/bin/env python
import sys
for _ in range(101):
print "P\n."
sys.stdout.flush()
|
ethercrow/ai-challenger
|
game-rps/paper.py
|
Python
|
mit
| 95 | 0.010526 |
#!/usr/bin/env python
"""xplot.py:
This program uses matplotlib to plot xplot like data.
Last modified: Thu Jul 23, 2015 04:54PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@iitb.ac.in"
__status__ = "Development"
import sys
import pylab
data = {}
def buildData( file ):
global data
with open(file, "r") as f:
xvec = []
yvec = []
for line in f:
if line[0] == ';' or line[0] == '#':
continue
line = line.strip()
if "," in line:
line = line.split(",")
else:
line = line.split()
try:
xvec.append(float(line[0]))
yvec.append(line[1:])
except:
pass
assert len(xvec) == len(yvec)
data[file] = (xvec, yvec)
def zipIt(ys):
""" Zip an n-dims vector.
There are as many sublists as there are elements in each element of list.
"""
result = [[ ] for x in ys[0] ]
for y in ys:
for i, e in enumerate(y):
result[i].append(e)
return result
def plotData( args ):
outFile = args.output
global data
for file in data:
xvec, yx = data[file]
try:
yvecs = zipIt(yx)
except Exception as e:
print("[FATAL] Failed to zip the given elements")
sys.exit(0)
for yvec in yvecs:
pylab.plot(xvec, yvec)
if args.title:
pylab.title(str(args.title))
if not outFile:
pylab.show()
else:
print("[INFO] Saving plots to: {}".format( outFile ))
pylab.savefig(outFile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--file"
, nargs = "+"
, help = "xplot file to plot using matplotlib"
)
parser.add_argument("-o", "--output"
, default = None
, help = "Output file to store plot"
)
parser.add_argument("-t", "--title"
, default = ""
, help = "Title of the plot"
)
args = parser.parse_args()
[ buildData(file) for file in args.file ]
plotData( args )
|
BhallaLab/benchmarks
|
moose_nrn_equivalence_testing/comparision_with_simple_HH_model/xplot.py
|
Python
|
gpl-2.0
| 2,472 | 0.023463 |
from setuptools import setup
import minify.command
setup(name='cloaca',
version='0.1.0',
url='https://github.com/mhmurray/cloaca',
author='Michael Murray',
author_email='michaelhamburgmurray@gmail.com',
license='MIT',
packages=['cloaca'],
zip_safe=False,
include_package_data=True,
scripts=[
'cloaca/cloacaapp.py'
],
install_requires=[
'tornado>=4.3.0',
'tornadis>=0.7.0',
'bcrypt>=2.0.0',
'futures>=3.0.5',
'minify',
],
cmdclass={
'minify_css' : minify.command.minify_css,
},
)
|
mhmurray/cloaca
|
setup.py
|
Python
|
mit
| 696 | 0.018678 |
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron._i18n import _
from neutron.common import utils
SHARED_OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket.')),
cfg.StrOpt('metadata_proxy_user',
default='',
help=_("User (uid or name) running metadata proxy after "
"its initialization (if empty: agent effective "
"user).")),
cfg.StrOpt('metadata_proxy_group',
default='',
help=_("Group (gid or name) running metadata proxy after "
"its initialization (if empty: agent effective "
"group)."))
]
DRIVER_OPTS = [
cfg.BoolOpt('metadata_proxy_watch_log',
help=_("Enable/Disable log watch by metadata proxy. It "
"should be disabled when metadata_proxy_user/group "
"is not allowed to read/write its log file and "
"copytruncate logrotate option must be used if "
"logrotate is enabled on metadata proxy log "
"files. Option default value is deduced from "
"metadata_proxy_user: watch log is enabled if "
"metadata_proxy_user is agent effective user "
"id/name.")),
]
METADATA_PROXY_HANDLER_OPTS = [
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.PortOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('When proxying metadata requests, Neutron signs the '
'Instance-ID header with a shared secret to prevent '
'spoofing. You may select any string for a secret, '
'but it must match here and in the configuration used '
'by the Nova Metadata Server. NOTE: Nova uses the same '
'config key, but in [neutron] section.'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
DEDUCE_MODE = 'deduce'
USER_MODE = 'user'
GROUP_MODE = 'group'
ALL_MODE = 'all'
SOCKET_MODES = (DEDUCE_MODE, USER_MODE, GROUP_MODE, ALL_MODE)
UNIX_DOMAIN_METADATA_PROXY_OPTS = [
cfg.StrOpt('metadata_proxy_socket_mode',
default=DEDUCE_MODE,
choices=SOCKET_MODES,
help=_("Metadata Proxy UNIX domain socket mode, 4 values "
"allowed: "
"'deduce': deduce mode from metadata_proxy_user/group "
"values, "
"'user': set metadata proxy socket mode to 0o644, to "
"use when metadata_proxy_user is agent effective user "
"or root, "
"'group': set metadata proxy socket mode to 0o664, to "
"use when metadata_proxy_group is agent effective "
"group or root, "
"'all': set metadata proxy socket mode to 0o666, to use "
"otherwise.")),
cfg.IntOpt('metadata_workers',
default=utils.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server (defaults to half of the number of CPUs)')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
|
wolverineav/neutron
|
neutron/agent/metadata/config.py
|
Python
|
apache-2.0
| 5,135 | 0 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
# pylint: disable=line-too-long
r"""Run training loop.
"""
# pylint: enable=line-too-long
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.specs.tensor_spec import TensorSpec
import tqdm
from representation_batch_rl.batch_rl import asac
from representation_batch_rl.batch_rl import awr
from representation_batch_rl.batch_rl import ddpg
from representation_batch_rl.batch_rl import evaluation
from representation_batch_rl.batch_rl import pcl
from representation_batch_rl.batch_rl import sac
from representation_batch_rl.batch_rl import sac_v1
from representation_batch_rl.batch_rl.image_utils import image_aug
from representation_batch_rl.twin_sac import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'pixels-dm-cartpole-swingup',
'Environment for training/evaluation.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed for training.')
flags.DEFINE_float('actor_lr', 3e-4, 'Actor learning rate.')
flags.DEFINE_float('alpha_lr', 3e-4, 'Temperature learning rate.')
flags.DEFINE_float('critic_lr', 3e-4, 'Critic learning rate.')
flags.DEFINE_integer('deployment_batch_size', 1, 'Batch size.')
flags.DEFINE_integer('sample_batch_size', 256, 'Batch size.')
flags.DEFINE_float('discount', 0.99, 'Discount used for returns.')
flags.DEFINE_float('tau', 0.005,
'Soft update coefficient for the target network.')
flags.DEFINE_integer('max_timesteps', 200_000, 'Max timesteps to train.')
flags.DEFINE_integer('max_length_replay_buffer', 100_000,
'Max replay buffer size (image observations use 100k).')
flags.DEFINE_integer('num_random_actions', 10_000,
'Fill replay buffer with N random actions.')
flags.DEFINE_integer('start_training_timesteps', 10_000,
'Start training when replay buffer contains N timesteps.')
flags.DEFINE_string('save_dir', '/tmp/save/', 'Directory to save results to.')
flags.DEFINE_integer('log_interval', 1_000, 'Log every N timesteps.')
flags.DEFINE_integer('eval_interval', 10_000, 'Evaluate every N timesteps.')
flags.DEFINE_integer('action_repeat', 8,
'(optional) action repeat used when instantiating env.')
flags.DEFINE_integer('frame_stack', 0,
'(optional) frame stack used when instantiating env.')
flags.DEFINE_enum('algo_name', 'sac', [
'ddpg',
'crossnorm_ddpg',
'sac',
'pc_sac',
'pcl',
'crossnorm_sac',
'crr',
'awr',
'sac_v1',
'asac',
], 'Algorithm.')
flags.DEFINE_boolean('eager', False, 'Execute functions eagerly.')
def main(_):
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
action_repeat = FLAGS.action_repeat
_, _, domain_name, _ = FLAGS.env_name.split('-')
if domain_name in ['cartpole']:
FLAGS.set_default('action_repeat', 8)
elif domain_name in ['reacher', 'cheetah', 'ball_in_cup', 'hopper']:
FLAGS.set_default('action_repeat', 4)
elif domain_name in ['finger', 'walker']:
FLAGS.set_default('action_repeat', 2)
FLAGS.set_default('max_timesteps', FLAGS.max_timesteps // FLAGS.action_repeat)
env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
eval_env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
is_image_obs = (isinstance(env.observation_spec(), TensorSpec) and
len(env.observation_spec().shape) == 3)
spec = (
env.observation_spec(),
env.action_spec(),
env.reward_spec(),
env.reward_spec(), # discount spec
env.observation_spec() # next observation spec
)
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
spec, batch_size=1, max_length=FLAGS.max_length_replay_buffer)
@tf.function
def add_to_replay(state, action, reward, discount, next_states):
replay_buffer.add_batch((state, action, reward, discount, next_states))
hparam_str = utils.make_hparam_string(
FLAGS.xm_parameters, seed=FLAGS.seed, env_name=FLAGS.env_name,
algo_name=FLAGS.algo_name)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
results_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'results', hparam_str))
if 'ddpg' in FLAGS.algo_name:
model = ddpg.DDPG(
env.observation_spec(),
env.action_spec(),
cross_norm='crossnorm' in FLAGS.algo_name)
elif 'crr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='bin_max')
elif 'awr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='exp_mean')
elif 'sac_v1' in FLAGS.algo_name:
model = sac_v1.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'asac' in FLAGS.algo_name:
model = asac.ASAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'sac' in FLAGS.algo_name:
model = sac.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0],
cross_norm='crossnorm' in FLAGS.algo_name,
pcl_actor_update='pc' in FLAGS.algo_name)
elif 'pcl' in FLAGS.algo_name:
model = pcl.PCL(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
initial_collect_policy = random_tf_policy.RandomTFPolicy(
env.time_step_spec(), env.action_spec())
dataset = replay_buffer.as_dataset(
num_parallel_calls=tf.data.AUTOTUNE,
sample_batch_size=FLAGS.sample_batch_size)
if is_image_obs:
# Augment images as in DRQ.
dataset = dataset.map(image_aug,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False).prefetch(3)
else:
dataset = dataset.prefetch(3)
def repack(*data):
return data[0]
dataset = dataset.map(repack)
replay_buffer_iter = iter(dataset)
previous_time = time.time()
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
step_mult = 1 if action_repeat < 1 else action_repeat
for i in tqdm.tqdm(range(FLAGS.max_timesteps)):
if i % FLAGS.deployment_batch_size == 0:
for _ in range(FLAGS.deployment_batch_size):
if timestep.is_last():
if episode_timesteps > 0:
current_time = time.time()
with summary_writer.as_default():
tf.summary.scalar(
'train/returns',
episode_return,
step=(i + 1) * step_mult)
tf.summary.scalar(
'train/FPS',
episode_timesteps / (current_time - previous_time),
step=(i + 1) * step_mult)
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
previous_time = time.time()
if (replay_buffer.num_frames() < FLAGS.num_random_actions or
replay_buffer.num_frames() < FLAGS.deployment_batch_size):
# Use policy only after the first deployment.
policy_step = initial_collect_policy.action(timestep)
action = policy_step.action
else:
action = model.actor(timestep.observation, sample=True)
next_timestep = env.step(action)
add_to_replay(timestep.observation, action, next_timestep.reward,
next_timestep.discount, next_timestep.observation)
episode_return += next_timestep.reward[0]
episode_timesteps += 1
timestep = next_timestep
if i + 1 >= FLAGS.start_training_timesteps:
with summary_writer.as_default():
info_dict = model.update_step(replay_buffer_iter)
if (i + 1) % FLAGS.log_interval == 0:
with summary_writer.as_default():
for k, v in info_dict.items():
tf.summary.scalar(f'training/{k}', v, step=(i + 1) * step_mult)
if (i + 1) % FLAGS.eval_interval == 0:
logging.info('Performing policy eval.')
average_returns, evaluation_timesteps = evaluation.evaluate(
eval_env, model)
with results_writer.as_default():
tf.summary.scalar(
'evaluation/returns', average_returns, step=(i + 1) * step_mult)
tf.summary.scalar(
'evaluation/length', evaluation_timesteps, step=(i+1) * step_mult)
logging.info('Eval at %d: ave returns=%f, ave episode length=%f',
(i + 1) * step_mult, average_returns, evaluation_timesteps)
if (i + 1) % FLAGS.eval_interval == 0:
model.save_weights(
os.path.join(FLAGS.save_dir, 'results',
FLAGS.env_name + '__' + str(i + 1)))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
representation_batch_rl/batch_rl/train_eval_online.py
|
Python
|
apache-2.0
| 9,735 | 0.006471 |
#!/usr/bin/env python
def run(c, *channels):
server = c['MainWindow'].ui_plugins['ServerList'].active_server
connection = c['MainWindow'].ui_plugins['ServerList'].servers[server]['connection']
if isinstance(channels, str):
channels = [channels]
for channel in channels:
if channel.startswith('#') is False:
channel = '#' + channel
channel = channel.replace('\n', '').replace('\r', '')
connection.send('JOIN ' + channel.strip())
return None
|
Sjc1000/PyRC
|
Commands/join.py
|
Python
|
gpl-2.0
| 505 | 0.00396 |
"""Tests for the preset and the history of queries."""
import json
import os
from qgis.core import QgsCoordinateReferenceSystem, QgsRectangle
from qgis.testing import unittest
from QuickOSM.core.utilities.json_encoder import as_enum
from QuickOSM.core.utilities.query_saved import QueryManagement
from QuickOSM.core.utilities.tools import query_preset
from QuickOSM.definitions.format import Format
from QuickOSM.definitions.gui import Panels
from QuickOSM.ui.dialog import Dialog
from QuickOSM.ui.edit_preset import EditPreset
__copyright__ = 'Copyright 2021, 3Liz'
__license__ = 'GPL version 3'
__email__ = 'info@3liz.org'
class TestBookmarkQuery(unittest.TestCase):
"""Tests for the preset and the history of queries."""
def setUp(self):
"""Set up the tests"""
self.maxDiff = None
self.preset_folder = query_preset()
self.dialog = Dialog()
index = self.dialog.table_keys_values_qq.cellWidget(0, 1).findText('amenity')
self.dialog.table_keys_values_qq.cellWidget(0, 1).setCurrentIndex(index)
index = self.dialog.table_keys_values_qq.cellWidget(0, 2).findText('bench')
self.dialog.table_keys_values_qq.cellWidget(0, 2).setCurrentIndex(index)
self.dialog.places_edits[Panels.QuickQuery].setText('foo')
self.dialog.button_save_query.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
def set_up_preset_data_text(self) -> dict:
"""Load the data save in the json file linked to the preset."""
preset_file = os.path.join(
self.preset_folder, self.name_preset, self.name_preset + '.json')
with open(preset_file, encoding='utf8') as json_file:
data_preset = json.load(json_file)
return data_preset
def set_up_preset_data(self) -> dict:
"""Load the data save in the json file linked to the preset."""
preset_folder = query_preset()
preset_file = os.path.join(
preset_folder, self.name_preset, self.name_preset + '.json')
with open(preset_file, encoding='utf8') as json_file:
data_preset = json.load(json_file, object_hook=as_enum)
return data_preset
def tearDown(self):
"""End of the tests"""
self.dialog.external_panels[Panels.MapPreset].remove_preset(self.preset, self.name_preset)
def test_save_in_preset(self):
"""Test if the file is save in preset."""
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
self.assertEqual(self.name_preset, 'amenity_bench_foo')
def test_preset_format(self):
"""Test if the file in preset is as expected."""
data_preset = self.set_up_preset_data_text()
expected_json = {
"query":
[
"[out:xml] [timeout:25];\n {{geocodeArea:foo}} -> .area_0;\n(\n"
" node[\"amenity\"=\"bench\"](area.area_0);\n "
"way[\"amenity\"=\"bench\"](area.area_0);\n "
"relation[\"amenity\"=\"bench\"](area.area_0);\n);\n"
"(._;>;);\nout body;"
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["amenity_bench_foo"],
"query_name": ["Query1"],
"type_multi_request": [[]],
"keys": [["amenity"]],
"values": [["bench"]],
"area": ["foo"],
"bbox": [""],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [{"__enum__": "Format.GeoPackage"}]
}
self.assertDictEqual(expected_json, data_preset)
def test_view_bookmark(self):
"""Test if we can display a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
self.assertEqual(data_preset['file_name'], edit_dialog.preset_name.text())
self.assertEqual(
data_preset['description'], edit_dialog.description.toPlainText().split('\\n')
)
self.assertEqual(data_preset['query_layer_name'][0], edit_dialog.layer_name.text())
self.assertEqual(data_preset['query'][0], edit_dialog.query.toPlainText())
self.assertEqual(data_preset['area'][0], edit_dialog.area.text())
self.assertFalse(edit_dialog.bbox.outputExtent().xMinimum())
self.assertFalse(edit_dialog.bbox.outputExtent().yMinimum())
self.assertTrue(edit_dialog.checkbox_points.isChecked())
self.assertTrue(edit_dialog.checkbox_lines.isChecked())
self.assertTrue(edit_dialog.checkbox_multilinestrings.isChecked())
self.assertTrue(edit_dialog.checkbox_multipolygons.isChecked())
self.assertFalse(edit_dialog.white_points.text())
self.assertFalse(edit_dialog.white_lines.text())
self.assertFalse(edit_dialog.white_multilinestrings.text())
self.assertFalse(edit_dialog.white_multipolygons.text())
self.assertEqual(edit_dialog.combo_output_format.currentData(), Format.GeoPackage)
self.assertEqual(
data_preset['output_directory'][0], edit_dialog.output_directory.filePath()
)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
edit_dialog.preset_name.setText('Test a new name')
edit_dialog.button_cancel.click()
self.dialog.external_panels[Panels.MapPreset].update_personal_preset_view()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
self.assertNotEqual(self.name_preset, 'Test_a_new_name')
def test_edit_rename_bookmark(self):
"""Test if we can edit and rename a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
edit_dialog.preset_name.setText('Test a new name')
edit_dialog.button_validate.click()
self.dialog.external_panels[Panels.MapPreset].update_personal_preset_view()
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
self.assertEqual(self.name_preset, 'Test_a_new_name')
def test_edited_bookmark_file(self):
"""Test if we can edit a preset and check the edited json file."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
edit_dialog.description.setPlainText('Be or not to be...\\nShakespear')
edit_dialog.layer_name.setText('Misery')
edit_dialog.query.setPlainText('I would like two pencils please.')
edit_dialog.checkbox_points.setChecked(True)
edit_dialog.checkbox_lines.setChecked(True)
edit_dialog.checkbox_multilinestrings.setChecked(False)
edit_dialog.checkbox_multipolygons.setChecked(False)
edit_dialog.white_points.setText('name')
index = edit_dialog.combo_output_format.findData(Format.Kml)
edit_dialog.combo_output_format.setCurrentIndex(index)
edit_dialog.button_validate.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
new_data = self.set_up_preset_data_text()
expected_json = {
"query":
[
"I would like two pencils please."
],
"description":
["Be or not to be...", "Shakespear"],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["Misery"],
"query_name": ["Query1"],
"type_multi_request": [[]],
"keys": [["amenity"]],
"values": [["bench"]],
"area": ["foo"],
"bbox": [{'__extent__': '0.0 0.0 0.0 0.0'}],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": 'name', "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [{"__enum__": "Format.Kml"}]
}
self.assertDictEqual(expected_json, new_data)
def test_advanced_view(self):
"""Test if the view match the preset type."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
current = edit_dialog.stacked_parameters_preset.currentWidget()
self.assertEqual(current, edit_dialog.basic_parameters)
edit_dialog.radio_advanced.setChecked(True)
current = edit_dialog.stacked_parameters_preset.currentWidget()
self.assertEqual(current, edit_dialog.advanced_parameters)
def test_bookmark_several_query(self):
"""Test if we can manage (add and remove) several queries in a preset."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
self.assertEqual(edit_dialog.current_query, 0)
edit_dialog.button_add.click()
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 2)
self.assertEqual(edit_dialog.current_query, 1)
self.assertEqual(edit_dialog.layer_name.text(), '')
edit_dialog.layer_name.setText('Query2')
index = edit_dialog.table_keys_values_eb.cellWidget(0, 1).findText('type')
edit_dialog.table_keys_values_eb.cellWidget(0, 1).setCurrentIndex(index)
edit_dialog.table_keys_values_eb.cellWidget(0, 3).click()
index = edit_dialog.table_keys_values_eb.cellWidget(1, 1).findText('route')
edit_dialog.table_keys_values_eb.cellWidget(1, 1).setCurrentIndex(index)
edit_dialog.key_edited(1)
index = edit_dialog.table_keys_values_eb.cellWidget(1, 2).findText('bicycle')
edit_dialog.table_keys_values_eb.cellWidget(1, 2).setCurrentIndex(index)
index = edit_dialog.table_keys_values_eb.cellWidget(0, 2).findText('route')
edit_dialog.table_keys_values_eb.cellWidget(0, 2).setCurrentIndex(index)
edit_dialog.button_validate.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
new_data = self.set_up_preset_data_text()
expected_json = {
"query":
[
"[out:xml] [timeout:25];\n {{geocodeArea:foo}} -> .area_0;\n(\n"
" node[\"amenity\"=\"bench\"](area.area_0);\n "
"way[\"amenity\"=\"bench\"](area.area_0);\n "
"relation[\"amenity\"=\"bench\"](area.area_0);\n);\n"
"(._;>;);\nout body;",
""
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": False,
"file_name": "amenity_bench_foo",
"query_layer_name": ["amenity_bench_foo", "Query2"],
"query_name": ["Query1", "Query2"],
"type_multi_request": [[], [{"__enum__": "MultiType.AND"}]],
"keys": [["amenity"], ["type", "route"]],
"values": [["bench"], ["route", "bicycle"]],
"area": ["foo", ""],
"bbox": [{'__extent__': '0.0 0.0 0.0 0.0'}, {'__extent__': '0.0 0.0 0.0 0.0'}],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
], [
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[
{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None},
{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}
],
"output_directory": ["", ""],
"output_format": [{"__enum__": "Format.GeoPackage"}, None]
}
self.assertDictEqual(expected_json, new_data)
edit_dialog.list_queries.setCurrentRow(0)
self.assertEqual(edit_dialog.current_query, 0)
self.assertEqual(edit_dialog.layer_name.text(), 'amenity_bench_foo')
edit_dialog.delete_query(0)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
self.assertEqual(edit_dialog.layer_name.text(), 'Query2')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
x_min = 2.71828
x_max = 3.1415926
y_min = 0.0
y_max = 1.6180339
rect = QgsRectangle(x_min, y_min, x_max, y_max)
edit_dialog.bbox.setOutputExtentFromUser(rect, crs)
self.assertEqual(
edit_dialog.stacked_parameters_preset.currentWidget(), edit_dialog.basic_parameters)
edit_dialog.radio_advanced.setChecked(True)
self.assertEqual(
edit_dialog.stacked_parameters_preset.currentWidget(), edit_dialog.advanced_parameters)
edit_dialog.button_validate.click()
self.preset = self.dialog.list_personal_preset_mp.item(0)
new_data = self.set_up_preset_data_text()
expected_json = {
"query":
[
""
],
"description":
["All OSM objects with the key 'amenity'='bench' in foo are going to be downloaded."],
"advanced": True,
"file_name": "amenity_bench_foo",
"query_layer_name": ["Query2"],
"query_name": ["Query1"],
"type_multi_request": [[{"__enum__": "MultiType.AND"}]],
"keys": [["type", "route"]],
"values": [["route", "bicycle"]],
"area": [""],
"bbox": [{'__extent__': '2.71828 0.0 3.1415926 1.6180339'}],
"output_geom_type":
[
[
{"__enum__": "LayerType.Points"},
{"__enum__": "LayerType.Lines"},
{"__enum__": "LayerType.Multilinestrings"},
{"__enum__": "LayerType.Multipolygons"}
]
],
"white_list_column":
[{"multilinestrings": None, "points": None, "lines": None, "multipolygons": None}],
"output_directory": [""],
"output_format": [None]
}
self.assertDictEqual(expected_json, new_data)
def test_add_in_preset(self):
"""Test if we can add a query in a preset from the Quick Query panel."""
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 1)
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
q_manage = QueryManagement(
query='',
name='aeroway_control_tower_foo',
description='',
advanced=False,
keys='aeroway',
values='control_tower',
area='foo'
)
q_manage.add_query_in_preset('amenity_bench_foo')
self.preset = self.dialog.list_personal_preset_mp.item(0)
layout_label = self.dialog.list_personal_preset_mp.itemWidget(self.preset).layout()
self.name_preset = layout_label.itemAt(0).itemAt(0).widget().text()
data_preset = self.set_up_preset_data()
edit_dialog = EditPreset(self.dialog, data_preset)
nb_queries = edit_dialog.list_queries.count()
self.assertEqual(nb_queries, 2)
nb_preset = self.dialog.list_personal_preset_mp.count()
self.assertEqual(nb_preset, 1)
|
Gustry/QuickOSM
|
QuickOSM/test/test_saved_query.py
|
Python
|
gpl-2.0
| 17,258 | 0.002318 |
from JumpScale import j
# import re
import os
# import jinja2
from watchdog.events import FileSystemEventHandler
# The default Observer on Linux (InotifyObserver) hangs in the call to `observer.schedule` because the observer uses `threading.Lock`, which is
# monkeypatched by `gevent`. To work around this, I use `PollingObserver`. It's more CPU consuming than `InotifyObserver`, but still better than
# reloading the doc processor
#
#from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver as Observer
class DocHandler(FileSystemEventHandler):
def __init__(self, doc_processor):
self.doc_processor = doc_processor
def on_created(self, event):
print(('Document {} added'.format(event.src_path)))
path = os.path.dirname(event.src_path)
pathItem = event.src_path
docs = []
if pathItem:
lastDefaultPath = ""
if pathItem.endswith('.wiki'):
lastDefaultPath = os.path.join(self.doc_processor.space_path, '.space', 'default.wiki')
elif pathItem.endswith('.md'):
lastDefaultPath = os.path.join(self.doc_processor.space_path, '.space', 'default.md')
elif pathItem.endswith('.py'):
self.reloadMacro(event)
self.doc_processor.add_doc(pathItem, path, docs=docs, lastDefaultPath=lastDefaultPath)
self.doc_processor.docs[-1].loadFromDisk()
self.doc_processor.docs[-1].preprocess()
def on_modified(self, event):
if event.src_path and not event.is_directory and event.src_path.endswith(".py"):
self.reloadMacro(event)
def reloadMacro(self, event):
for macroexecute in (self.doc_processor.macroexecutorPreprocessor,
self.doc_processor.macroexecutorWiki, self.doc_processor.macroexecutorPage):
for groupname, taskletenginegroup in list(macroexecute.taskletsgroup.items()):
for group, taskletengine in list(taskletenginegroup.taskletEngines.items()):
for tasklet in taskletengine.tasklets:
if tasklet.path == event.src_path:
taskletengine.reloadTasklet(tasklet)
return
on_moved = on_created
|
Jumpscale/jumpscale_portal8
|
lib/portal/docpreprocessor/DocHandler.py
|
Python
|
apache-2.0
| 2,305 | 0.004338 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from mantid.api import (DataProcessorAlgorithm, mtd, AlgorithmFactory,
FileProperty, FileAction,
MultipleFileProperty, WorkspaceProperty,
PropertyMode, Progress,
MatrixWorkspaceProperty,
ITableWorkspaceProperty)
from mantid.simpleapi import (LoadIsawUB, MaskDetectors, ConvertUnits,
CropWorkspace, LoadInstrument,
SetGoniometer, ConvertToMD, MDNorm,
MinusMD, Load, DeleteWorkspace,
RenameWorkspaces,
CreateSingleValuedWorkspace, LoadNexus,
MultiplyMD, LoadIsawDetCal, LoadMask,
CopyInstrumentParameters,
ApplyCalibration, CopySample,
RecalculateTrajectoriesExtents,
CropWorkspaceForMDNorm)
from mantid.kernel import VisibleWhenProperty, PropertyCriterion, FloatArrayLengthValidator, FloatArrayProperty, Direction, Property
from mantid import logger
import numpy as np
class SingleCrystalDiffuseReduction(DataProcessorAlgorithm):
temp_workspace_list = ['__run', '__md', '__data', '__norm',
'__bkg', '__bkg_md', '__bkg_data', '__bkg_norm',
'__normalizedData', '__normalizedBackground',
'PreprocessedDetectorsWS']
def category(self):
return "Diffraction\\Reduction"
def seeAlso(self):
return [ "ConvertToMD","MDNormSCDPreprocessIncoherent","MDNorm" ]
def name(self):
return "SingleCrystalDiffuseReduction"
def summary(self):
return "Single Crystal Diffuse Scattering Reduction, normalisation, symmetry and background substraction"
def PyInit(self):
# files to reduce
self.declareProperty(MultipleFileProperty(name="Filename",
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Files to combine in reduction")
# background
self.declareProperty(FileProperty(name="Background",defaultValue="",action=FileAction.OptionalLoad,
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Background run")
self.declareProperty("BackgroundScale", 1.0,
doc="The background will be scaled by this number before being subtracted.")
# Filter by TOF
self.copyProperties('LoadEventNexus', ['FilterByTofMin', 'FilterByTofMax'])
# Vanadium SA and flux
self.declareProperty("ReuseSAFlux", True, "If True then if a previous SolidAngle and Flux has been loaded "
"it will be reused otherwise it will be loaded.")
self.declareProperty(FileProperty(name="SolidAngle",defaultValue="",action=FileAction.Load,
extensions=[".nxs"]),
doc="An input workspace containing momentum integrated vanadium (a measure "
"of the solid angle). See :ref:`MDNormSCDPreprocessIncoherent <algm-MDNormSCDPreprocessIncoherent>` "
"for details")
self.declareProperty(FileProperty(name="Flux",defaultValue="",action=FileAction.Load,
extensions=[".nxs"]),
"An input workspace containing momentum dependent flux. See :ref:`MDnormSCD <algm-MDnormSCD>` for details")
self.declareProperty('MomentumMin', Property.EMPTY_DBL,
doc="Minimum value in momentum. The max of this value and the flux momentum minimum will be used.")
self.declareProperty('MomentumMax', Property.EMPTY_DBL,
doc="Maximum value in momentum. The min of this value and the flux momentum maximum will be used.")
# UBMatrix
self.declareProperty(MultipleFileProperty(name="UBMatrix",
extensions=[".mat", ".ub", ".txt"]),
doc="Path to an ISAW-style UB matrix text file. See :ref:`LoadIsawUB <algm-LoadIsawUB>`")
# Goniometer
self.declareProperty('SetGoniometer', False, "Set which Goniometer to use. See :ref:`SetGoniometer <algm-SetGoniometer>`")
condition = VisibleWhenProperty("SetGoniometer", PropertyCriterion.IsNotDefault)
self.copyProperties('SetGoniometer', ['Goniometers', 'Axis0', 'Axis1', 'Axis2'])
self.setPropertySettings("Goniometers", condition)
self.setPropertySettings('Axis0', condition)
self.setPropertySettings('Axis1', condition)
self.setPropertySettings('Axis2', condition)
self.declareProperty(FloatArrayProperty('OmegaOffset', [], direction=Direction.Input),
doc="Offset to apply to the omega rotation of the Goniometer. Need to provide one value for every run.")
# Corrections
self.declareProperty(FileProperty(name="LoadInstrument",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml"]),
"Load a different instrument IDF onto the data from a file. See :ref:`LoadInstrument <algm-LoadInstrument>`")
self.declareProperty(ITableWorkspaceProperty("ApplyCalibration", '',
optional=PropertyMode.Optional,
direction=Direction.Input),
doc='Calibration will be applied using this TableWorkspace using '
':ref:`ApplyCalibration <algm-ApplyCalibration>`.')
self.declareProperty(FileProperty(name="DetCal",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".detcal"]),
"Load an ISAW DetCal calibration onto the data from a file. "
"See :ref:`LoadIsawDetCal <algm-LoadIsawDetCal>`")
self.declareProperty(MatrixWorkspaceProperty("CopyInstrumentParameters", '',
optional=PropertyMode.Optional,
direction=Direction.Input),
doc='The input workpsace from which :ref:`CopyInstrumentParameters <algm-CopyInstrumentParameters>` '
'will copy parameters to data')
self.declareProperty(FileProperty(name="MaskFile",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml",".msk"]),
"Masking file for masking. Supported file format is XML and ISIS ASCII. See :ref:`LoadMask <algm-LoadMask>`")
self.copyProperties('MDNorm', ['SymmetryOperations'])
self.declareProperty(FloatArrayProperty('QDimension0', [1, 0, 0], FloatArrayLengthValidator(3), direction=Direction.Input),
"The first Q projection axis")
self.declareProperty(FloatArrayProperty('QDimension1', [0, 1, 0], FloatArrayLengthValidator(3), direction=Direction.Input),
"The second Q projection axis")
self.declareProperty(FloatArrayProperty('QDimension2', [0, 0, 1], FloatArrayLengthValidator(3), direction=Direction.Input),
"The third Q projection axis")
self.copyProperties('MDNorm', ['Dimension0Binning', 'Dimension1Binning', 'Dimension2Binning'])
self.declareProperty('KeepTemporaryWorkspaces', False,
"If True the normalization and data workspaces in addition to the normalized data will be outputted")
self.declareProperty(WorkspaceProperty("OutputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Output),
"Output Workspace. If background is subtracted _data and _background workspaces will also be made.")
# Background
self.setPropertyGroup("Background","Background")
self.setPropertyGroup("BackgroundScale","Background")
# Vanadium
self.setPropertyGroup("ReuseSAFlux","Vanadium")
self.setPropertyGroup("SolidAngle","Vanadium")
self.setPropertyGroup("Flux","Vanadium")
self.setPropertyGroup("MomentumMin","Vanadium")
self.setPropertyGroup("MomentumMax","Vanadium")
# Goniometer
self.setPropertyGroup("SetGoniometer","Goniometer")
self.setPropertyGroup("Goniometers","Goniometer")
self.setPropertyGroup("Axis0","Goniometer")
self.setPropertyGroup("Axis1","Goniometer")
self.setPropertyGroup("Axis2","Goniometer")
self.setPropertyGroup("OmegaOffset","Goniometer")
# Corrections
self.setPropertyGroup("LoadInstrument","Corrections")
self.setPropertyGroup("ApplyCalibration","Corrections")
self.setPropertyGroup("DetCal","Corrections")
self.setPropertyGroup("CopyInstrumentParameters","Corrections")
self.setPropertyGroup("MaskFile","Corrections")
# Projection and binning
self.setPropertyGroup("QDimension0","Projection and binning")
self.setPropertyGroup("QDimension1","Projection and binning")
self.setPropertyGroup("QDimension2","Projection and binning")
self.setPropertyGroup("Dimension0Binning","Projection and binning")
self.setPropertyGroup("Dimension1Binning","Projection and binning")
self.setPropertyGroup("Dimension2Binning","Projection and binning")
def validateInputs(self):
issues = {}
UBs = self.getProperty("UBMatrix").value
Omega = self.getProperty("OmegaOffset").value
runs = self.getProperty("Filename").value
if not (len(UBs) == 1 or len(UBs) == len(runs)):
issues["UBMatrix"] = "Must provide one matrix, or a separate UB matrix for every run"
if not (len(Omega) == 0 or len(Omega) == len(runs)):
issues["OmegaOffset"] = "Must be either empty or provide one value for every run"
return issues
def PyExec(self):
# remove possible old temp workspaces
[DeleteWorkspace(ws) for ws in self.temp_workspace_list if mtd.doesExist(ws)]
_background = bool(self.getProperty("Background").value)
self._load_inst = bool(self.getProperty("LoadInstrument").value)
self._apply_cal = bool(self.getProperty("ApplyCalibration").value)
self._detcal = bool(self.getProperty("DetCal").value)
self._copy_params = bool(self.getProperty("CopyInstrumentParameters").value)
_masking = bool(self.getProperty("MaskFile").value)
_outWS_name = self.getPropertyValue("OutputWorkspace")
_UB = self.getProperty("UBMatrix").value
if len(_UB) == 1:
_UB = np.tile(_UB, len(self.getProperty("Filename").value))
_offsets = self.getProperty("OmegaOffset").value
if len(_offsets) == 0:
_offsets = np.zeros(len(self.getProperty("Filename").value))
if self.getProperty("ReuseSAFlux").value and mtd.doesExist('__sa') and mtd.doesExist('__flux'):
logger.notice("Reusing previously loaded SolidAngle and Flux workspaces. "
"Set ReuseSAFlux to False if new files are selected or you change the momentum range.")
else:
logger.notice("Loading SolidAngle and Flux from file")
LoadNexus(Filename=self.getProperty("SolidAngle").value, OutputWorkspace='__sa')
LoadNexus(Filename=self.getProperty("Flux").value, OutputWorkspace='__flux')
if _masking:
LoadMask(Instrument=mtd['__sa'].getInstrument().getName(),
InputFile=self.getProperty("MaskFile").value,
OutputWorkspace='__mask')
MaskDetectors(Workspace='__sa',MaskedWorkspace='__mask')
DeleteWorkspace('__mask')
self.XMin = mtd['__sa'].getXDimension().getMinimum()
self.XMax = mtd['__sa'].getXDimension().getMaximum()
newXMin = self.getProperty("MomentumMin").value
newXMax = self.getProperty("MomentumMax").value
if newXMin != Property.EMPTY_DBL or newXMax != Property.EMPTY_DBL:
if newXMin != Property.EMPTY_DBL:
self.XMin = max(self.XMin, newXMin)
if newXMax != Property.EMPTY_DBL:
self.XMax = min(self.XMax, newXMax)
logger.notice("Using momentum range {} to {} A^-1".format(self.XMin, self.XMax))
CropWorkspace(InputWorkspace='__flux',OutputWorkspace='__flux',XMin=self.XMin,XMax=self.XMax)
for spectrumNumber in range(mtd['__flux'].getNumberHistograms()):
Y = mtd['__flux'].readY(spectrumNumber)
mtd['__flux'].setY(spectrumNumber,(Y-Y.min())/(Y.max()-Y.min()))
MinValues = [-self.XMax*2]*3
MaxValues = [self.XMax*2]*3
if _background:
self.load_file_and_apply(self.getProperty("Background").value, '__bkg', 0)
progress = Progress(self, 0.0, 1.0, len(self.getProperty("Filename").value))
for n, run in enumerate(self.getProperty("Filename").value):
logger.notice("Working on " + run)
self.load_file_and_apply(run, '__run', _offsets[n])
LoadIsawUB('__run', _UB[n])
ConvertToMD(InputWorkspace='__run',
OutputWorkspace='__md',
QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='Q_sample',
MinValues=MinValues,
MaxValues=MaxValues)
RecalculateTrajectoriesExtents(InputWorkspace= '__md', OutputWorkspace='__md')
MDNorm(InputWorkspace='__md',
FluxWorkspace='__flux',
SolidAngleWorkspace='__sa',
OutputDataWorkspace='__data',
TemporaryDataWorkspace='__data' if mtd.doesExist('__data') else None,
OutputNormalizationWorkspace='__norm',
TemporaryNormalizationWorkspace='__norm' if mtd.doesExist('__norm') else None,
OutputWorkspace=_outWS_name,
QDimension0=self.getProperty('QDimension0').value,
QDimension1=self.getProperty('QDimension1').value,
QDimension2=self.getProperty('QDimension2').value,
Dimension0Binning=self.getProperty('Dimension0Binning').value,
Dimension1Binning=self.getProperty('Dimension1Binning').value,
Dimension2Binning=self.getProperty('Dimension2Binning').value,
SymmetryOperations=self.getProperty('SymmetryOperations').value)
DeleteWorkspace('__md')
if _background:
# Set background Goniometer and UB to be the same as data
CopySample(InputWorkspace='__run',OutputWorkspace='__bkg',
CopyName=False,CopyMaterial=False,CopyEnvironment=False,CopyShape=False,
CopyLattice=True)
mtd['__bkg'].run().getGoniometer().setR(mtd['__run'].run().getGoniometer().getR())
ConvertToMD(InputWorkspace='__bkg',
OutputWorkspace='__bkg_md',
QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='Q_sample',
MinValues=MinValues,
MaxValues=MaxValues)
RecalculateTrajectoriesExtents(InputWorkspace= '__bkg_md', OutputWorkspace='__bkg_md')
MDNorm(InputWorkspace='__bkg_md',
FluxWorkspace='__flux',
SolidAngleWorkspace='__sa',
OutputDataWorkspace='__bkg_data',
TemporaryDataWorkspace='__bkg_data' if mtd.doesExist('__bkg_data') else None,
OutputNormalizationWorkspace='__bkg_norm',
TemporaryNormalizationWorkspace='__bkg_norm' if mtd.doesExist('__bkg_norm') else None,
OutputWorkspace='__normalizedBackground',
QDimension0=self.getProperty('QDimension0').value,
QDimension1=self.getProperty('QDimension1').value,
QDimension2=self.getProperty('QDimension2').value,
Dimension0Binning=self.getProperty('Dimension0Binning').value,
Dimension1Binning=self.getProperty('Dimension1Binning').value,
Dimension2Binning=self.getProperty('Dimension2Binning').value,
SymmetryOperations=self.getProperty('SymmetryOperations').value)
DeleteWorkspace('__bkg_md')
progress.report()
DeleteWorkspace('__run')
if _background:
# outWS = data / norm - bkg_data / bkg_norm * BackgroundScale
CreateSingleValuedWorkspace(OutputWorkspace='__scale', DataValue=self.getProperty('BackgroundScale').value)
MultiplyMD(LHSWorkspace='__normalizedBackground',
RHSWorkspace='__scale',
OutputWorkspace='__normalizedBackground')
DeleteWorkspace('__scale')
MinusMD(LHSWorkspace=_outWS_name,RHSWorkspace='__normalizedBackground',OutputWorkspace=_outWS_name)
if self.getProperty('KeepTemporaryWorkspaces').value:
RenameWorkspaces(InputWorkspaces=['__data','__norm','__bkg_data','__bkg_norm'],
WorkspaceNames=[_outWS_name+'_data', _outWS_name+'_normalization',
_outWS_name+'_background_data',_outWS_name+'_background_normalization'])
else:
if self.getProperty('KeepTemporaryWorkspaces').value:
RenameWorkspaces(InputWorkspaces=['__data','__norm'],
WorkspaceNames=[_outWS_name+'_data', _outWS_name+'_normalization'])
self.setProperty("OutputWorkspace", mtd[_outWS_name])
# remove temp workspaces
[DeleteWorkspace(ws) for ws in self.temp_workspace_list if mtd.doesExist(ws)]
def load_file_and_apply(self, filename, ws_name, offset):
Load(Filename=filename,
OutputWorkspace=ws_name,
FilterByTofMin=self.getProperty("FilterByTofMin").value,
FilterByTofMax=self.getProperty("FilterByTofMax").value)
if self._load_inst:
LoadInstrument(Workspace=ws_name, Filename=self.getProperty("LoadInstrument").value, RewriteSpectraMap=False)
if self._apply_cal:
ApplyCalibration(Workspace=ws_name, PositionTable=self.getProperty("ApplyCalibration").value)
if self._detcal:
LoadIsawDetCal(InputWorkspace=ws_name, Filename=self.getProperty("DetCal").value)
if self._copy_params:
CopyInstrumentParameters(OutputWorkspace=ws_name, InputWorkspace=self.getProperty("CopyInstrumentParameters").value)
MaskDetectors(Workspace=ws_name,MaskedWorkspace='__sa')
if offset != 0:
if self.getProperty('SetGoniometer').value:
SetGoniometer(Workspace=ws_name,
Goniometers=self.getProperty('Goniometers').value,
Axis0='{},0,1,0,1'.format(offset),
Axis1=self.getProperty('Axis0').value,
Axis2=self.getProperty('Axis1').value,
Axis3=self.getProperty('Axis2').value)
else:
SetGoniometer(Workspace=ws_name,
Axis0='{},0,1,0,1'.format(offset),
Axis1='omega,0,1,0,1',
Axis2='chi,0,0,1,1',
Axis3='phi,0,1,0,1')
else:
if self.getProperty('SetGoniometer').value:
SetGoniometer(Workspace=ws_name,
Goniometers=self.getProperty('Goniometers').value,
Axis0=self.getProperty('Axis0').value,
Axis1=self.getProperty('Axis1').value,
Axis2=self.getProperty('Axis2').value)
ConvertUnits(InputWorkspace=ws_name,OutputWorkspace=ws_name,Target='Momentum')
CropWorkspaceForMDNorm(InputWorkspace=ws_name,OutputWorkspace=ws_name,XMin=self.XMin,XMax=self.XMax)
AlgorithmFactory.subscribe(SingleCrystalDiffuseReduction)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SingleCrystalDiffuseReduction.py
|
Python
|
gpl-3.0
| 21,263 | 0.007149 |
# -*- coding: utf-8 -*-
from troubleshooting.framework.modules.manager import ManagerFactory
from troubleshooting.framework.variable.variable import *
from troubleshooting.framework.libraries.baseList import list2stringAndFormat
from troubleshooting.framework.libraries.system import createDir
from troubleshooting.framework.modules.configuration import ConfigManagerInstance
import time
import os,sys
from htmltemplate import *
import re
class html(object):
def __init__(self):
super(html,self).__init__()
self.caseResult = ManagerFactory().getManager(LAYER.Case).case_record
self.currenttime = time.strftime("%Y-%m-%d %X %Z",time.localtime())
def write(self):
data = ""
data += HTML_BEFORE
data += HTML_HEAD
data +="""
<body bgcolor = "#E9EAEE">
<h1 align="center">TroubleShooting Framework Report</h1>
<p><i>%s</i></p>
<table width="100%%" border="2" class="bordered">
<thead>
<tr ><th width="15%%">CaseName</th><th width="5%%" >Status</th><th width="80%%">Attribute</th></tr>
</thead>
<tbody>
"""%(self.currenttime,)
recovery_id = 1
for i,caseName in enumerate(self.caseResult):
i += 1
caseStatus = self.caseResult[caseName]["STATUS"]
DESCRIPTION = self.caseResult[caseName]["DESCRIPTION"]
REFERENCE = self.caseResult[caseName]["REFERENCE"]
REFERENCEHtml = '<a href="%s">reference document</>'%REFERENCE if REFERENCE else '<font color="#d0d0d0">NA</font>'
TAGS = self.caseResult[caseName]["TAGS"]
TESTPOINT = self.caseResult[caseName]["TESTPOINT"]
parent_pass = """
<tr bgcolor="#53C579" class="parent" id="row_0%s"><td colspan="1">%s</td><td>PASS</td><td colspan="1"></td></tr>"""%(i,caseName,)
parent_fail = """
<tr bgcolor="#FF3030" class="parent" id="row_0%s"><td colspan="1">%s</td><td>FAIL</td><td colspan="1"></td></tr>"""%(i,caseName,)
parent_warn = """
<tr bgcolor="#FF7F00" class="parent" id="row_0%s"><td colspan="1">%s</td><td>WARN</td><td colspan="1"></td></tr>"""%(i,caseName,)
if caseStatus:
data += parent_pass
else:
_level = self.caseResult[caseName]["LEVEL"]
if _level is LEVEL.CRITICAL:
data += parent_fail
else:
data += parent_warn
data += """
<tr class="child_row_0%s" style="display:none"><td>Description</td><td></td><td>%s</td></tr>
<tr class="child_row_0%s" style="display:none"><td>Reference</td><td></td><td>%s</td></tr>
<tr class="child_row_0%s" style="display:none"><td>Tags</td><td></td><td>%s</td></tr>
"""%(i,DESCRIPTION,i,REFERENCEHtml,i,TAGS)
data += """
<tr class="child_row_0%s" style="display:none">
<td colspan="3" >
<table border="1" width="100%%" style="margin:0px">
"""%i
data += """
<tr>
<th width="5%%">
<b>TestPoint</b>
</th>
<th width="5%%">
<b>Status</b>
</th>
<th width="5%%">
<b>Level</b>
</th>
<th width="15%%" name="nolog">
<b>Impact</b>
</th>
<th width="35%%" name="nolog">
<b>Root Cause</b>
</th>
<th width="15%%" name="nolog">
<b>Fix Method</b>
</th>
<th width="20%%" name="nolog">
<b>Auto Fix Method</b>
</th>
<th style="display:none;" width="85%%" name="log">
<b>LOG</b>
</th>
</tr>
"""
for testpoint in TESTPOINT:
testpointStatus = TESTPOINT[testpoint]["STATUS"]
testpointStatusHtml = '<font color="green"><b><i>%s</i></b></font>' % STATUS.PASS.value.lower() if testpointStatus else '<font color="red"><b><i>%s</i></b></font>' % STATUS.FAIL.value.lower()
testpointImpact = TESTPOINT[testpoint]["IMPACT"]
testpointImpact = list2stringAndFormat(testpointImpact)
if not testpointImpact:
testpointImpact = '<font color="#d0d0d0">NA</font>'
testpointImpactHtml = testpointImpact.replace("\n","</br>")
testpointLevel = TESTPOINT[testpoint]["LEVEL"]
testpointLevelHtml = testpointLevel.value
testpointDescribe = TESTPOINT[testpoint]["DESCRIBE"]
testpointRCA = TESTPOINT[testpoint]["RCA"]
testpointRCA = list2stringAndFormat(testpointRCA)
if not testpointRCA:
testpointRCA = '<font color="#d0d0d0">NA</font>'
testpointRCAHtml = testpointRCA.replace("\n","</br>")
testpointFIXSTEP = TESTPOINT[testpoint]["FIXSTEP"]
testpointFIXSTEP = list2stringAndFormat(testpointFIXSTEP)
if not testpointFIXSTEP:
testpointFIXSTEP = '<font color="#d0d0d0">NA</font>'
testpointFIXSTEPHtml = testpointFIXSTEP.replace("\n","</br>")
testpointAutoFixStep = TESTPOINT[testpoint]["AUTOFIXSTEP"]
if not testpointAutoFixStep:
testpointAutoFixStep = '<font color="#d0d0d0">NA</font>'
else:
if ConfigManagerInstance.config["Host"]:
reportHash = ConfigManagerInstance.config["__ReportHash__"]
reportName = ConfigManagerInstance.config["__ReportName__"]
host = ConfigManagerInstance.config["Host"]
port = ConfigManagerInstance.config["Port"]
user = ConfigManagerInstance.config["User"]
password = ConfigManagerInstance.config["Password"]
cwd =ConfigManagerInstance.config["__ProjectCWD__"]
recovery = {"ProjectDir":cwd,"Host":host,"Port":port,"User":user,"Password":password,"Recovery":",".join(testpointAutoFixStep)}
testpointAutoFixStep = """
<iframe scrolling="no" src="/www/iframe/growl-genie.html?recovery=%s&reportHash=%s&reportName=%s"></iframe>
"""%(recovery,reportHash,reportName)
testpointAutoFixStepHtml = testpointAutoFixStep
testpointLog = TESTPOINT[testpoint]["LOG"]
testpointLogHtml = testpointLog
pattern = re.compile(r"\<.+\>")
match = pattern.finditer(testpointLog)
if match:
for m in match:
className = m.group()
testpointLogHtml = testpointLogHtml.replace(className,'<font color="#FFB90F">%s</font>'%className)
testpointLogHtml = testpointLogHtml.replace("\n", "</br>")
testpointTimeout = TESTPOINT[testpoint]["TIMEOUT"]
testpointCost = TESTPOINT[testpoint]["COST"]
testpointHtml = '<i title="Timeout: %s\nCostTime: %s">%s<i>'%(testpointTimeout,testpointCost,testpoint.strip("{}"))
attribute = """
<tr>
<td>
<i>%s</i>
</td>
<td>
<i>%s</i>
</td>
<td>
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td name="nolog">
<i>%s</i>
</td>
<td style="display:none" name="log">
<i>%s</i>
</td>
</tr>
"""%(testpointHtml,testpointStatusHtml,testpointLevelHtml,testpointImpactHtml,testpointRCAHtml,testpointFIXSTEPHtml,testpointAutoFixStepHtml,testpointLogHtml)
data += attribute
data += """
</table>
</td>
</tr>
"""
data += """
</tbody>
</table>
"""
data += BUTTON
# data += HTML_LOG
data += BODY_AFTER
data += HTML_AFTER
reportDir = os.path.dirname(ConfigManagerInstance.config["Report"])
createDir(reportDir)
reportPath = ConfigManagerInstance.config["Report"]
with open(reportPath,"w") as f:
f.write(data)
|
gaoxiaofeng/troubleShooting
|
src/troubleshooting/framework/output/writehtml.py
|
Python
|
apache-2.0
| 9,570 | 0.009613 |
import unittest
from nose2 import events, loader, session
from nose2.plugins.loader import functions
from nose2.tests._common import TestCase
class TestFunctionLoader(TestCase):
def setUp(self):
self.session = session.Session()
self.loader = loader.PluggableTestLoader(self.session)
self.plugin = functions.Functions(session=self.session)
def test_can_load_test_functions_from_module(self):
class Mod(object):
pass
def test():
pass
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 1)
assert isinstance(event.extraTests[0], unittest.FunctionTestCase)
def test_ignores_generator_functions(self):
class Mod(object):
pass
def test():
yield
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 0)
def test_ignores_functions_that_take_args(self):
class Mod(object):
pass
def test(a):
pass
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 0)
|
ptthiem/nose2
|
nose2/tests/unit/test_functions_loader.py
|
Python
|
bsd-2-clause
| 1,464 | 0 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("_orange", ["_orange.pyx"],
include_dirs=[np.get_include()]
)]
)
|
PythonCharmers/orange3
|
Orange/src/setup.py
|
Python
|
gpl-3.0
| 324 | 0.018519 |
__author__ = 'wangp11'
AA=1
l1 = [13, 13]
n = 1
def print_l1():
print "id A.py: %d" % id(l1)
print l1
def extend_l1():
l1.extend([1,31,31])
print l1
def print_n():
print n
|
peter-wangxu/python_play
|
test/A.py
|
Python
|
apache-2.0
| 195 | 0.035897 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('follow_key', models.CharField(max_length=255)),
('published', models.BooleanField(default=True)),
('client_domain', models.CharField(max_length=100, null=True, verbose_name='CLient Domain', blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
('user', models.ForeignKey(related_name='follows', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Follow',
'verbose_name_plural': 'Follows',
},
),
migrations.AlterUniqueTogether(
name='follow',
unique_together=set([('user', 'follow_key')]),
),
]
|
lafactura/datea-api
|
datea_api/apps/follow/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,483 | 0.004046 |
#!/usr/bin/env python
# octavo-admin
# An administrative script for our bookclub
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sun Mar 30 20:26:20 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: octavo-admin.py [] benjamin@bengfort.com $
"""
An administrative script for our bookclub
"""
##########################################################################
## Imports
##########################################################################
import os
import sys
import argparse
##########################################################################
## Command Line Variables
##########################################################################
DESCRIPTION = "An administrative utility for the Science bookclub"
EPILOG = "If there are any bugs or concerns, please comment on Github"
##########################################################################
## Main Method
##########################################################################
def main(*argv):
# Construct the argument parser
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
# Add command line arguments
# TODO
# Parse the arguments from the commandline
options = parser.parse_args()
if __name__ == '__main__':
main(*sys.argv)
|
DistrictDataLabs/science-bookclub
|
bin/octavo-admin.py
|
Python
|
apache-2.0
| 1,356 | 0.004425 |
from rest_framework import (
serializers,
viewsets,
)
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Author, Book
class BookSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Book
fields = (
'url',
'author',
'title',
)
class AuthorSerializer(serializers.HyperlinkedModelSerializer):
books = BookSerializer(many=True, read_only=True)
class Meta:
model = Author
fields = (
'url',
'name',
'books',
)
class AuthorViewSet(viewsets.ModelViewSet):
serializer_class = AuthorSerializer
queryset = Author.objects.all()
class BookViewSet(viewsets.ModelViewSet):
serializer_class = BookSerializer
queryset = Book.objects.all()
@api_view(['GET', 'POST'])
def echo(request):
return Response({
'GET': request.GET,
'POST': request.POST,
'META': request.META,
})
@api_view(['POST'])
def upload_file(request):
file = request.FILES['file']
return Response({
'name': file.name,
'content': file.read().decode('utf-8'),
})
|
rockymeza/django-local-requests
|
tests/views.py
|
Python
|
bsd-2-clause
| 1,217 | 0 |
from .. import Provider as CurrencyProvider
class Provider(CurrencyProvider):
# Format: (code, name)
# See currency names in Russian: https://ru.wikipedia.org/wiki/Список_существующих_валют#Валюты
currencies = (
("AED", "Дирхам ОАЭ"),
("AFN", "Афгани"),
("ALL", "Лек"),
("AMD", "Армянский драм"),
("ANG", "Нидерландский антильский гульден"),
("AOA", "Кванза"),
("ARS", "Аргентинское песо"),
("AUD", "Австралийский доллар"),
("AWG", "Арубанский флорин"),
("AZN", "Азербайджанский манат"),
("BAM", "Конвертируемая марка Боснии и Герцеговины"),
("BBD", "Барбадосский доллар"),
("BDT", "Така"),
("BGN", "Болгарский лев"),
("BHD", "Бахрейнский динар"),
("BIF", "Бурундийский франк"),
("BMD", "Бермудский доллар"),
("BND", "Брунейский доллар"),
("BOB", "Боливиано"),
("BRL", "Бразильский реал"),
("BSD", "Багамский доллар"),
("BTN", "Нгултрум"),
("BWP", "Пула"),
("BYR", "Белорусский рубль"),
("BZD", "Белизский доллар"),
("CAD", "Канадский доллар"),
("CDF", "Конголезский франк"),
("CHF", "Швейцарский франк"),
("CLP", "Чилийское песо"),
("CNY", "Юань"),
("COP", "Колумбийское песо"),
("CRC", "Коста-риканский колон"),
("CUC", "Кубанское конвертируемое песо"),
("CUP", "Кубанское песо"),
("CVE", "Эскудо Кабо-Верде"),
("CZK", "Чешская крона"),
("DJF", "Франк Джибути"),
("DKK", "Датская крона"),
("DOP", "Доминиканское песо"),
("DZD", "Алжирский динар"),
("EGP", "Египетский фунт"),
("ERN", "Накфа"),
("ETB", "Эфиопский быр"),
("EUR", "Евро"),
("FJD", "Доллар Фиджи"),
("FKP", "Фунт Фолклендских островов"),
("GBP", "Фунт стерлингов"),
("GEL", "Лари"),
("GGP", "Гернсийский фунт"),
("GHS", "Ганский седи"),
("GIP", "Гибралтарский фунт"),
("GMD", "Даласи"),
("GNF", "Гвинейский франк"),
("GTQ", "Кетсаль"),
("GYD", "Гайанский доллар"),
("HKD", "Гонконгский доллар"),
("HNL", "Лемпира"),
("HRK", "Хорватская куна"),
("HTG", "Гурд"),
("HUF", "Форинт"),
("IDR", "Индонезийская рупия"),
("ILS", "Новый израильский шекель"),
("NIS", "Новый израильский шекель"),
("IMP", "Фунт острова Мэн"),
("INR", "Индийская рупия"),
("IQD", "Иракский динар"),
("IRR", "Иранский риал"),
("ISK", "Исландская крона"),
("JEP", "Джерсийский фунт"),
("JMD", "Ямайский доллар"),
("JOD", "Иорданский динар"),
("JPY", "Иена"),
("KES", "Кенийский шиллинг"),
("KGS", "Сом"),
("KHR", "Риель"),
("KMF", "Франк Комор"),
("KPW", "Северокорейская вона"),
("KRW", "Южнокорейская вона"),
("KWD", "Кувейтский динар"),
("KYD", "Доллар Островов Кайман"),
("KZT", "Тенге"),
("LAK", "Кип"),
("LBP", "Ливийский фунт"),
("LKR", "Шри-ланкийская рупия"),
("LRD", "Либерийский доллар"),
("LSL", "Лоти"),
("LTL", "Литовский лит"),
("LYD", "Ливийский динар"),
("MAD", "Марокканский дирхам"),
("MDL", "Молдавский лей"),
("MGA", "Малагасийский ариари"),
("MKD", "Денар"),
("MMK", "Кьят"),
("MNT", "Тугрик"),
("MOP", "Патака"),
("MRO", "Угия"),
("MUR", "Маврикийская рупия"),
("MVR", "Рувия"),
("MWK", "Квача"),
("MXN", "Мексиканское песо"),
("MYR", "Малайзийский ринггит"),
("MZN", "Мозамбикский метикал"),
("NAD", "Доллар Намибии"),
("NGN", "Найра"),
("NIO", "Кордоба"),
("NOK", "Норвежская крона"),
("NPR", "Непальская рупия"),
("NZD", "Новозеландский доллар"),
("OMR", "Оманский риал"),
("PAB", "Бальбоа"),
("PEN", "Соль"),
("PGK", "Кина"),
("PHP", "Филиппинское песо"),
("PKR", "Пакистанская рупия"),
("PLN", "Злотый"),
("PYG", "Гуарани"),
("QAR", "Катарский риал"),
("RON", "Румынский лей"),
("RSD", "Сербский динар"),
("RUB", "Российский рубль"),
("RWF", "Франк Руанды"),
("SAR", "Саудовский риял"),
("SBD", "Доллар Соломоновых Островов"),
("SCR", "Сейшельская рупия"),
("SDG", "Суданский фунт"),
("SEK", "Шведская крона"),
("SGD", "Сингапурский доллар"),
("SHP", "Фунт Святой Елены"),
("SLL", "Леоне"),
("SOS", "Сомалийский шиллинг"),
("SPL", "Луиджино"),
("SRD", "Суринамский доллар"),
("STD", "Добра"),
("SVC", "Сальвадорский колон"),
("SYP", "Сирийский фунт"),
("SZL", "Лилангени"),
("THB", "Бат"),
("TJS", "Сомони"),
("TMT", "Новый туркменский манат"),
("TND", "Тунисский динар"),
("TOP", "Паанга"),
("TRY", "Турецкая лира"),
("TTD", "Доллар Тринидада и Тобаго"),
("TVD", "Доллар Тувалу"),
("TWD", "Новый тайваньский доллар"),
("TZS", "Танзанийский шиллинг"),
("UAH", "Гривна"),
("UGX", "Угандийский шиллинг"),
("USD", "Доллар США"),
("UYU", "Уругвайское песо"),
("UZS", "Узбекский сум"),
("VEF", "Суверенный боливар"),
("VND", "Донг"),
("VUV", "Вату"),
("WST", "Тала"),
("XAF", "Франк КФА ВЕАС"),
("XCD", "Восточно-карибский доллар"),
("XDR", "СДР"),
("XOF", "Франк КФА ВСЕАО"),
("XPF", "Франк КФП"),
("YER", "Йеменский риал"),
("ZAR", "Рэнд"),
("ZMW", "Замбийская квача"),
("ZWD", "Доллар Зимбабве"),
)
|
danhuss/faker
|
faker/providers/currency/ru_RU/__init__.py
|
Python
|
mit
| 7,994 | 0.00017 |
import feedparser
import time
# Create display instance on default I2C address (0x70) and bus number.
from Adafruit_LED_Backpack import AlphaNum4
display = AlphaNum4.AlphaNum4()
# Initialize the display. Must be called once before using the display.
display.begin()
#create string(s) with rss address for multiple feeds
RssAddress = "http://feeds.reuters.com/Reuters/domesticNews"
#create feed caled Rss
Rss = feedparser.parse(RssAddress)
#Loop to iterate through all titles in feed sleeping for 1 second between printing
display.clear()
display.write_display()
#Loop through each title of feed
for i in Rss.entries:
#prints title to console
print (i.title)
#reset position to begining
pos = 0
#Change string to Uppercase for readability and add --* buffer to begining and end to distinguish titles
CapString = "---*" + i.title.upper() + "*---"
# Dashed line in console for aesthetics
print("----------------------------------------------------------------")
#Loop for scrolling through title
for x in range(0,len(CapString)-4):
# Print a 4 character string to the display buffer.
display.print_str(CapString[pos:pos+4])
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
display.write_display()
# Increment position. Wrap back to 0 when the end is reached.
pos += 1
if pos > len(CapString)-4:
pos = 0
# Delay for 0.15 of a second. This can be changed to speed up or slow down the scroll.
time.sleep(0.15)
# Clear out display
display.print_str(" ")
display.write_display()
|
Epikarsios/RssLEDBackpack
|
RssLED.py
|
Python
|
gpl-3.0
| 1,628 | 0.032555 |
from coalib.bearlib.aspects.meta import issubaspect, assert_aspect
class aspectlist(list):
"""
List-derived container to hold aspects.
"""
def __init__(self, seq=()):
super().__init__(map(assert_aspect, seq))
def __contains__(self, aspect):
for item in self:
if issubaspect(aspect, item):
return True
return False
|
refeed/coala
|
coalib/bearlib/aspects/collections.py
|
Python
|
agpl-3.0
| 390 | 0 |
"""Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo myname@host.example.com:.')
child.expect ('Password:')
child.sendline (mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, Shane Kerr and Thomas Kluyver. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Pexpect Copyright (c) 2010 Noah Spurrier
http://pexpect.sourceforge.net/
"""
try:
import os, sys, time
import select
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError, e:
raise ImportError (str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.5.1'
version = __version__
version_info = (2,5,1)
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnb', 'run', 'which',
'split_command_line', '__version__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a scan buffer fills before matching an expected pattern."""
PY3 = (sys.version_info[0] >= 3)
def _cast_bytes(s, enc):
if isinstance(s, unicode):
return s.encode(enc)
return s
def _cast_unicode(s, enc):
if isinstance(s, bytes):
return s.decode(enc)
return s
re_type = type(re.compile(''))
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None,
logfile=None, cwd=None, env=None, encoding='utf-8'):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo myname@host.example.com:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo myname@host.example.com:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run ("ssh username@machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback."""
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
encoding=encoding)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env, encoding=encoding)
if events is not None:
patterns = events.keys()
responses = events.values()
else:
patterns=None # We assume that EOF or TIMEOUT will save us.
responses=None
child_result_list = []
event_count = 0
while 1:
try:
index = child.expect (patterns)
if isinstance(child.after, basestring):
child_result_list.append(child.before + child.after)
else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
child_result_list.append(child.before)
if isinstance(responses[index], basestring):
child.send(responses[index])
elif type(responses[index]) is types.FunctionType:
callback_result = responses[index](locals())
sys.stdout.flush()
if isinstance(callback_result, basestring):
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError ('The callback must be a string or function type.')
event_count = event_count + 1
except TIMEOUT, e:
child_result_list.append(child.before)
break
except EOF, e:
child_result_list.append(child.before)
break
child_result = child._empty_buffer.join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawnb(object):
"""Use this class to start and control child applications with a pure-bytes
interface."""
_buffer_type = bytes
def _cast_buffer_type(self, s):
return _cast_bytes(s, self.encoding)
_empty_buffer = b''
_pty_newline = b'\r\n'
# Some code needs this to exist, but it's mainly for the spawn subclass.
encoding = 'utf-8'
def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None):
"""This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn ('/usr/bin/ftp')
child = pexpect.spawn ('/usr/bin/ssh user@example.com')
child = pexpect.spawn ('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn ('/usr/bin/ftp', [])
child = pexpect.spawn ('/usr/bin/ssh', ['user@example.com'])
child = pexpect.spawn ('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
If you want to run a command and pipe it through another command then
you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > log_list.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incomming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incomming buffer. The default is to search from the beginning of the
imcomming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match The searchwindowsize does not
effect the size of the incomming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
self.status = None # status returned by os.waitpid
self.flag_eof = False
self.pid = None
self.child_fd = -1 # initially closed
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
self.logfile_read = None # input from child (read_nonblocking)
self.logfile_send = None # output to send (send, sendline)
self.maxread = maxread # max bytes to read at one time into buffer
self.buffer = self._empty_buffer # This is the read buffer. See maxread.
self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
# Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
self.softspace = False # File-like object.
self.name = '<' + repr(self) + '>' # File-like object.
self.closed = True # File-like object.
self.cwd = cwd
self.env = env
self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
# Solaris uses internal __fork_pty(). All others use pty.fork().
if 'solaris' in sys.platform.lower() or 'sunos5' in sys.platform.lower():
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# allow dummy instances for subclasses that may not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn (command, args)
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. """
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
# -- Fernando Perez
try:
self.close()
except:
pass
def __str__(self):
"""This returns a human-readable string that represents the state of
the object. """
s = []
s.append(repr(self))
s.append('version: ' + __version__)
s.append('command: ' + str(self.command))
s.append('args: ' + str(self.args))
s.append('searcher: ' + str(self.searcher))
s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
s.append('before (last 100 chars): ' + str(self.before)[-100:])
s.append('after: ' + str(self.after))
s.append('match: ' + str(self.match))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self,command,args=[]):
"""This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. """
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may haved spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if type(command) == type(0):
raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
if type (args) != type([]):
raise TypeError ('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
self.args = args[:] # work with a copy
self.args.insert (0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join (self.args) + '>'
assert self.pid is None, 'The pid member should be None.'
assert self.command is not None, 'The command member should not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError, e:
raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
else: # Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0: # Child
try:
self.child_fd = sys.stdout.fileno() # used by setwinsize()
self.setwinsize(24, 80)
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range (3, max_fd):
try:
os.close (i)
except OSError:
pass
# I don't know why this works, but ignoring SIGHUP fixes a
# problem when trying to start a Java daemon with sudo
# (specifically, Tomcat).
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
pid = os.fork()
if pid < 0:
raise ExceptionPexpect, "Error! Failed os.fork()."
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
"""This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. """
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
raise ExceptionPexpect, "Error! Failed to disconnect from controlling tty. It is still possible to open /dev/tty."
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR);
if fd < 0:
raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
else:
os.close(fd)
def fileno (self): # File-like object.
"""This returns the file descriptor of the pty for the child.
"""
return self.child_fd
def close (self, force=True): # File-like object.
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose) # Give kernel time to update process status.
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ('close() could not terminate the child using terminate()')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush (self): # File-like object.
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty (self): # File-like object.
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. """
return os.isatty(self.child_fd)
def waitnoecho (self, timeout=-1):
"""This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn ('ssh user@example.com')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
"""
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho (self):
"""This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). """
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho (self, state):
"""This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.expect (['1234'])
p.expect (['1234'])
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['abcd'])
p.expect (['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['1234'])
p.expect (['1234'])
p.expect (['abcd'])
p.expect (['wxyz'])
"""
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
# and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def read_nonblocking (self, size = 1, timeout = -1):
"""This reads at most size bytes from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely. If timeout is -1
then the self.timeout value is used. If timeout is 0 then the child is
polled and if there was no data immediately ready then this will raise
a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. """
if self.closed:
raise ValueError ('I/O operation on closed file in read_nonblocking().')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
if not r:
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
elif self.__irix_hack:
# This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
# This adds a 2 second delay, but only when the child is terminated.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
r,w,e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their processes are alive;
# then timeout on the select; and then finally admit that they are not alive.
self.flag_eof = True
raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
else:
raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError, e: # Linux does this
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
if s == b'': # BSD style
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
s2 = self._cast_buffer_type(s)
if self.logfile is not None:
self.logfile.write(s2)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write(s2)
self.logfile_read.flush()
return s
raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
def read (self, size = -1): # File-like object.
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return self._empty_buffer
if size < 0:
self.expect (self.delimiter) # delimiter default is EOF
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
if self._buffer_type is bytes:
pat = (u'.{%d}' % size).encode('ascii')
else:
pat = u'.{%d}' % size
cre = re.compile(pat, re.DOTALL)
index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.after ### self.before should be ''. Should I assert this?
return self.before
def readline(self, size = -1):
"""This reads and returns one entire line. A trailing newline is kept
in the string, but may be absent when a file ends with an incomplete
line. Note: This readline() looks for a \\r\\n pair even on UNIX
because this is what the pseudo tty device returns. So contrary to what
you may expect you will receive the newline as \\r\\n. An empty string
is returned when EOF is hit immediately. Currently, the size argument is
mostly ignored, so this behavior is not standard for a file-like
object. If size is 0 then an empty string is returned. """
if size == 0:
return self._empty_buffer
index = self.expect ([self._pty_newline, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.before + self._pty_newline
return self.before
def __iter__ (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
return self
def next (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
result = self.readline()
if result == self._empty_buffer:
raise StopIteration
return result
def readlines (self, sizehint = -1): # File-like object.
"""This reads until EOF using readline() and returns a list containing
the lines thus read. The optional "sizehint" argument is ignored. """
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s): # File-like object.
"""This is similar to send() except that there is no return value.
"""
self.send (s)
def writelines (self, sequence): # File-like object.
"""This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators There is no return value.
"""
for s in sequence:
self.write (s)
def send(self, s):
"""This sends a string to the child process. This returns the number of
bytes written. If a log file was set then the data is also written to
the log. """
time.sleep(self.delaybeforesend)
s2 = self._cast_buffer_type(s)
if self.logfile is not None:
self.logfile.write(s2)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write(s2)
self.logfile_send.flush()
c = os.write (self.child_fd, _cast_bytes(s, self.encoding))
return c
def sendline(self, s=''):
"""This is like send(), but it adds a line feed (os.linesep). This
returns the number of bytes written. """
n = self.send (s)
n = n + self.send (os.linesep)
return n
def sendcontrol(self, char):
"""This sends a control character to the child such as Ctrl-C or
Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
child.sendcontrol('g')
See also, sendintr() and sendeof().
"""
char = char.lower()
a = ord(char)
if a>=97 and a<=122:
a = a - ord('a') + 1
return self.send (chr(a))
d = {'@':0, '`':0,
'[':27, '{':27,
'\\':28, '|':28,
']':29, '}': 29,
'^':30, '~':30,
'_':31,
'?':127}
if char not in d:
return 0
return self.send (chr(d[char]))
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. """
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write (self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write (self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
else:
# platform does not define VEOF so assume CTRL-D
char = chr(4)
self.send(char)
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
if hasattr(termios, 'VINTR'):
char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
else:
# platform does not define VINTR so assume CTRL-C
char = chr(3)
self.send (char)
def eof (self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. """
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError, e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(); but, technically, the child
is still alive until its output is read. """
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect ('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. """
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form of waitpid to get
# status of a defunct process. This is super-lame. The flag_eof would have
# been set in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e: # No child processes
if e.errno == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# I have to do this twice for Solaris. I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process wishes to
# report, and the value of status is undefined.
if pid == 0:
try:
pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
except OSError, e: # This should never happen...
if e[0] == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# If pid is still 0 after two calls to waitpid() then
# the process really is alive. This seems to work on all platforms, except
# for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
# take care of this situation (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return False
def kill(self, sig):
"""This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. """
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def compile_pattern_list(self, patterns):
"""This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
"""
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
compile_flags = re.DOTALL # Allow dot to match \n
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for p in patterns:
if isinstance(p, (bytes, unicode)):
p = self._cast_buffer_type(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif type(p) is re_type:
p = self._prepare_regex_pattern(p)
compiled_pattern_list.append(p)
else:
raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
return compiled_pattern_list
def _prepare_regex_pattern(self, p):
"Recompile unicode regexes as bytes regexes. Overridden in subclass."
if isinstance(p.pattern, unicode):
p = re.compile(p.pattern.encode('utf-8'), p.flags &~ re.UNICODE)
return p
def expect(self, pattern, timeout = -1, searchwindowsize=-1):
"""This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first match
in the stream is chosen. If more than one pattern matches at that point,
the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect (['bar', 'foo', 'foobar'])
# returns 1 ('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect (['foobar', 'foo'])
# returns 0 ('foobar') if all input is available at once,
# but returs 1 ('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect (['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect (pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
"""
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT (which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. """
return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match."""
if isinstance(pattern_list, (bytes, unicode)) or pattern_list in (TIMEOUT, EOF):
pattern_list = [pattern_list]
return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
"""This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and what
to search for in the input.
See expect() for other arguments, return value and exceptions. """
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True: # Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end : ]
self.before = incoming[ : searcher.start]
self.after = incoming[searcher.start : searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if timeout is not None and timeout < 0:
raise TIMEOUT ('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking (self.maxread, timeout)
freshlen = len(c)
time.sleep (0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF, e:
self.buffer = self._empty_buffer
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF (str(e) + '\n' + str(self))
except TIMEOUT, e:
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT (str(e) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, r, c):
"""This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. """
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
TIOCSWINSZ = -2146929561 # Same bits, but with sign.
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', r, c, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character = b'\x1d', input_filter = None, output_filter = None):
"""This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
"""
# Flush the buffer.
if PY3: self.stdout.write(_cast_unicode(self.buffer, self.encoding))
else: self.stdout.write(self.buffer)
self.stdout.flush()
self.buffer = self._empty_buffer
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != b'' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
"""This is used by the interact() method.
"""
return os.read(fd, 1000)
def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
"""This is used by the interact() method.
"""
while self.isalive():
r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
if output_filter: data = output_filter(data)
if self.logfile is not None:
self.logfile.write (data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter: data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise
class spawn(spawnb):
"""This is the main class interface for Pexpect. Use this class to start
and control child applications."""
_buffer_type = unicode
def _cast_buffer_type(self, s):
return _cast_unicode(s, self.encoding)
_empty_buffer = u''
_pty_newline = u'\r\n'
def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, encoding='utf-8'):
super(spawn, self).__init__(command, args, timeout=timeout, maxread=maxread,
searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
self.encoding = encoding
def _prepare_regex_pattern(self, p):
"Recompile bytes regexes as unicode regexes."
if isinstance(p.pattern, bytes):
p = re.compile(p.pattern.decode(self.encoding), p.flags)
return p
def read_nonblocking(self, size=1, timeout=-1):
return super(spawn, self).read_nonblocking(size=size, timeout=timeout)\
.decode(self.encoding)
read_nonblocking.__doc__ = spawnb.read_nonblocking.__doc__
##############################################################################
# End of spawn class
##############################################################################
class searcher_string (object):
"""This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
"""
def __init__(self, strings):
"""This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in enumerate(strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (ns[0],' %d: "%s"' % ns) for ns in self._strings ]
ss.append((-1,'searcher_string:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
return '\n'.join(a[1] for a in ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen+len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re (object):
"""This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
"""
def __init__(self, patterns):
"""This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types."""
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in enumerate(patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (n,' %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
ss.append((-1,'searcher_re:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
return '\n'.join(a[1] for a in ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer)-searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if n < first_match:
first_match = n
the_match = match
best_index = index
if first_match == absurd_match:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which (filename):
"""This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None."""
# Special case where filename already contains a path.
if os.path.dirname(filename) != '':
if os.access (filename, os.X_OK):
return filename
if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
f = os.path.join(path, filename)
if os.access(f, os.X_OK):
return f
return None
def split_command_line(command_line):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
state_whitespace = 4 # The state of consuming whitespace between commands.
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\': # Escape the next character
state = state_esc
elif c == r"'": # Handle single quote
state = state_singlequote
elif c == r'"': # Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
None # Do nothing.
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:set sr et ts=4 sw=4 ft=python :
|
mikewesner-wf/glasshouse
|
appengine/lib/invoke/vendor/pexpect/__init__.py
|
Python
|
apache-2.0
| 78,307 | 0.004572 |
from rest_framework.routers import SimpleRouter
from api.formattedmetadatarecords import views
router = SimpleRouter()
router.register(r'formattedmetadatarecords', views.FormattedMetadataRecordViewSet, basename='formattedmetadatarecord')
urlpatterns = router.urls
|
CenterForOpenScience/SHARE
|
api/formattedmetadatarecords/urls.py
|
Python
|
apache-2.0
| 266 | 0.003759 |
import asyncio
import datetime
import importlib
import logging
import os
import platform
import signal
import sys
import time
from typing import Any, Dict, List, Optional, Set, Union, cast
import tomodachi.__version__
import tomodachi.container
import tomodachi.importer
import tomodachi.invoker
from tomodachi.container import ServiceContainer
from tomodachi.helpers.execution_context import clear_execution_context, clear_services, set_execution_context
from tomodachi.helpers.safe_modules import SAFE_MODULES
from tomodachi.importer import ServiceImporter
CancelledError = asyncio.CancelledError
try:
asyncioexceptions = getattr(asyncio, "exceptions")
if asyncioexceptions:
_CancelledError = asyncioexceptions.CancelledError
except (Exception, ModuleNotFoundError, ImportError):
_CancelledError = asyncio.CancelledError
class ServiceLauncher(object):
_close_waiter: Optional[asyncio.Future] = None
_stopped_waiter: Optional[asyncio.Future] = None
restart_services = False
services: Set = set()
@classmethod
def run_until_complete(
cls,
service_files: Union[List, set],
configuration: Optional[Dict] = None,
watcher: Any = None,
) -> None:
def stop_services() -> None:
asyncio.ensure_future(_stop_services())
async def _stop_services() -> None:
if cls._close_waiter and not cls._close_waiter.done():
cls._close_waiter.set_result(None)
for service in cls.services:
try:
service.stop_service()
except Exception:
pass
if cls._stopped_waiter:
cls._stopped_waiter.set_result(None)
if cls._stopped_waiter:
await cls._stopped_waiter
def sigintHandler(*args: Any) -> None:
sys.stdout.write("\b\b\r")
sys.stdout.flush()
logging.getLogger("system").warning("Received <ctrl+c> interrupt [SIGINT]")
cls.restart_services = False
def sigtermHandler(*args: Any) -> None:
logging.getLogger("system").warning("Received termination signal [SIGTERM]")
cls.restart_services = False
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
if loop and loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
for signame in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(getattr(signal, signame), stop_services)
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
signal.signal(signal.SIGINT, sigintHandler)
signal.signal(signal.SIGTERM, sigtermHandler)
watcher_future = None
if watcher:
async def _watcher_restart(updated_files: Union[List, set]) -> None:
cls.restart_services = True
for file in service_files:
try:
ServiceImporter.import_service_file(file)
except (SyntaxError, IndentationError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
cls.restart_services = False
return
pre_import_current_modules = [m for m in sys.modules.keys()]
cwd = os.getcwd()
for file in updated_files:
if file.lower().endswith(".py"):
module_name = file[:-3].replace("/", ".")
module_name_full_path = "{}/{}".format(os.path.realpath(cwd), file)[:-3].replace("/", ".")
try:
for m in pre_import_current_modules:
if m == module_name or (len(m) > len(file) and module_name_full_path.endswith(m)):
ServiceImporter.import_module(file)
except (SyntaxError, IndentationError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
cls.restart_services = False
return
logging.getLogger("watcher.restart").warning("Restarting services")
stop_services()
watcher_future = loop.run_until_complete(watcher.watch(loop=loop, callback_func=_watcher_restart))
cls.restart_services = True
init_modules = [m for m in sys.modules.keys()]
restarting = False
while cls.restart_services:
init_timestamp = time.time()
init_timestamp_str = datetime.datetime.utcfromtimestamp(init_timestamp).isoformat() + "Z"
process_id = os.getpid()
event_loop_alias = ""
event_loop_version = ""
try:
if "uvloop." in str(loop.__class__):
event_loop_alias = "uvloop"
import uvloop # noqa # isort:skip
event_loop_version = str(uvloop.__version__)
elif "asyncio." in str(loop.__class__):
event_loop_alias = "asyncio"
else:
event_loop_alias = "{}.{}".format(loop.__class__.__module__, loop.__class__.__name__)
except Exception:
event_loop_alias = str(loop)
clear_services()
clear_execution_context()
set_execution_context(
{
"tomodachi_version": tomodachi.__version__,
"python_version": platform.python_version(),
"system_platform": platform.system(),
"process_id": process_id,
"init_timestamp": init_timestamp_str,
"event_loop": event_loop_alias,
}
)
if event_loop_alias == "uvloop" and event_loop_version:
set_execution_context(
{
"uvloop_version": event_loop_version,
}
)
if watcher:
tz: Any = None
utc_tz: Any = None
try:
import pytz # noqa # isort:skip
import tzlocal # noqa # isort:skip
utc_tz = pytz.UTC
try:
tz = tzlocal.get_localzone()
if not tz:
tz = pytz.UTC
except Exception:
tz = pytz.UTC
except Exception:
pass
init_local_datetime = (
datetime.datetime.fromtimestamp(init_timestamp)
if tz and tz is not utc_tz and str(tz) != "UTC"
else datetime.datetime.utcfromtimestamp(init_timestamp)
)
print("---")
print("Starting tomodachi services (pid: {}) ...".format(process_id))
for file in service_files:
print("* {}".format(file))
print()
print(
"Current version: tomodachi {} on Python {}".format(
tomodachi.__version__, platform.python_version()
)
)
print(
"Event loop implementation: {}{}".format(
event_loop_alias, " {}".format(event_loop_version) if event_loop_version else ""
)
)
if tz:
print("Local time: {} {}".format(init_local_datetime.strftime("%B %d, %Y - %H:%M:%S,%f"), str(tz)))
print("Timestamp in UTC: {}".format(init_timestamp_str))
print()
print("File watcher is active - code changes will automatically restart services")
print("Quit running services with <ctrl+c>")
print()
cls._close_waiter = asyncio.Future()
cls._stopped_waiter = asyncio.Future()
cls.restart_services = False
try:
cls.services = set(
[
ServiceContainer(ServiceImporter.import_service_file(file), configuration)
for file in service_files
]
)
result = loop.run_until_complete(
asyncio.wait([asyncio.ensure_future(service.run_until_complete()) for service in cls.services])
)
exception = [v.exception() for v in [value for value in result if value][0] if v.exception()]
if exception:
raise cast(Exception, exception[0])
except tomodachi.importer.ServicePackageError:
pass
except Exception as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
if isinstance(e, ModuleNotFoundError): # pragma: no cover
missing_module_name = str(getattr(e, "name", None) or "")
if missing_module_name:
color = ""
color_reset = ""
try:
import colorama # noqa # isort:skip
color = colorama.Fore.WHITE + colorama.Back.RED
color_reset = colorama.Style.RESET_ALL
except Exception:
pass
print("")
print(
"{}[fatal error] The '{}' package is missing or cannot be imported.{}".format(
color, missing_module_name, color_reset
)
)
print("")
if restarting:
logging.getLogger("watcher.restart").warning("Service cannot restart due to errors")
logging.getLogger("watcher.restart").warning("Trying again in 1.5 seconds")
loop.run_until_complete(asyncio.wait([asyncio.sleep(1.5)]))
if cls._close_waiter and not cls._close_waiter.done():
cls.restart_services = True
else:
for signame in ("SIGINT", "SIGTERM"):
loop.remove_signal_handler(getattr(signal, signame))
else:
for signame in ("SIGINT", "SIGTERM"):
loop.remove_signal_handler(getattr(signal, signame))
current_modules = [m for m in sys.modules.keys()]
for m in current_modules:
if m not in init_modules and m not in SAFE_MODULES:
del sys.modules[m]
importlib.reload(tomodachi.container)
importlib.reload(tomodachi.invoker)
importlib.reload(tomodachi.invoker.base)
importlib.reload(tomodachi.importer)
restarting = True
if watcher:
if watcher_future and not watcher_future.done():
try:
watcher_future.set_result(None)
except RuntimeError: # pragma: no cover
watcher_future.cancel()
if not watcher_future.done(): # pragma: no cover
try:
loop.run_until_complete(watcher_future)
except (Exception, CancelledError, _CancelledError):
pass
|
kalaspuff/tomodachi
|
tomodachi/launcher.py
|
Python
|
mit
| 12,014 | 0.002164 |
# a = 7 or a = 12
b = a
b -= 1
d = a
a = 0
# a += b*d
c = b
a += 1
c -= 1
while c != 0
d -= 1
while d !=0
b -= 1
c = b
# c *= 2
d = c
d -= 1
c += 1
while d != 0
tgl c
c = -16
while b > 1
c = 1
# a += 95*95
c = 95
d = 95
a += 1
d -= 1
while d != 0
c -= 1
while c != 0
|
tbjoern/adventofcode
|
Twentythree/reverse.py
|
Python
|
mit
| 331 | 0.096677 |
import itertools, os
import pandas as pd, sys
import numpy as np, matplotlib.pylab as plt
Q = 1.0 ; T = 1.0
class Game:
def __init__(self,df):
self.df = df.copy()
self.df_orig = self.df.copy()
# dictionaries of df variables - used for speedy access
self.df_capability = df.Capability.to_dict()
self.df_position = df.Position.to_dict()
self.df_salience = df.Salience.to_dict()
self.max_pos = df.Position.max()
self.min_pos = df.Position.min()
def weighted_median(self):
df = self.df.copy()
df['w'] = df.Capability*df.Salience
df = df.sort_index(by='Position',ascending=True)
df['w'] = df['w'] / df['w'].sum()
df['w'] = df['w'].cumsum()
return float(df[df['w']>=0.5].head(1).Position)
def mean(self):
return (self.df.Capability*self.df.Position*self.df.Salience).sum() / \
(self.df.Capability*self.df.Salience).sum()
def Usi_i(self,i,j,ri=1.):
tmp1 = self.df_position[i]-self.df_position[j]
tmp2 = self.max_pos-self.min_pos
return 2. - 4.0 * ( (0.5-0.5*np.abs(float(tmp1)/tmp2) )**ri)
def Ufi_i(self,i,j,ri=1.):
tmp1 = self.df_position[i]-self.df_position[j]
tmp2 = self.df.Position.max()-self.df.Position.min()
return 2. - 4.0 * ( (0.5+0.5*np.abs(float(tmp1)/tmp2) )**ri )
def Usq_i(self,i,ri=1.):
return 2.-(4.*(0.5**ri))
def Ui_ij(self,i,j):
tmp1 = self.df_position[i] - self.df_position[j]
tmp2 = self.max_pos-self.min_pos
return 1. - 2.*np.abs(float(tmp1) / tmp2)
def v(self,i,j,k):
return self.df_capability[i]*self.df_salience[i]*(self.Ui_ij(i,j)-self.Ui_ij(i,k))
def Pi(self,i):
l = np.array([[i,j,k] for (j,k) in itertools.combinations(range(len(self.df)), 2 ) if i!=j and i!=k])
U_filter = np.array(map(lambda (i,j,k): self.Ui_ij(j,i)>self.Ui_ij(i,k), l))
lpos = l[U_filter]
tmp1 = np.sum(map(lambda (i,j,k): self.v(j,i,k), lpos))
tmp2 = np.sum(map(lambda (i,j,k): np.abs(self.v(j,i,k)), l))
return float(tmp1)/tmp2
def Ubi_i(self,i,j,ri=1):
tmp1 = np.abs(self.df_position[i] - self.weighted_median()) + \
np.abs(self.df_position[i] - self.df_position[j])
tmp2 = np.abs(self.max_pos-self.min_pos)
return 2. - (4. * (0.5 - (0.25 * float(tmp1) / tmp2))**ri)
def Uwi_i(self,i,j,ri=1):
tmp1 = np.abs(self.df_position[i] - self.weighted_median()) + \
np.abs(self.df_position[i] - self.df_position[j])
tmp2 = np.abs(self.max_pos-self.min_pos)
return 2. - (4. * (0.5 + (0.25 * float(tmp1) / tmp2))**ri)
def EU_i(self,i,j,r=1):
term1 = self.df_salience[j]*self.Pi(i)*self.Usi_i(i,j,r)
term2 = self.df_salience[j]*(1.-self.Pi(i))*self.Ufi_i(i,j,r)
term3 = (1-self.df_salience[j])*self.Usi_i(i,j,r)
term4 = Q*self.Usq_i(i,r)
term5 = (1.-Q)*( T*self.Ubi_i(i,j,r) + (1.-T)*self.Uwi_i(i,j,r) )
return (term1+term2+term3)-(term4+term5)
def EU_j(self,i,j,r=1):
return self.EU_i(j,i,r)
def Ri(self,i):
# get all j's expect i
l = [x for x in range(len(self.df)) if x!= i]
tmp = np.array(map(lambda x: self.EU_j(i,x), l))
numterm1 = 2*np.sum(tmp)
numterm2 = (len(self.df)-1)*np.max(tmp)
numterm3 = (len(self.df)-1)*np.min(tmp)
return float(numterm1-numterm2-numterm3) / (numterm2-numterm3)
def ri(self,i):
Ri_tmp = self.Ri(i)
return (1-Ri_tmp/3.) / (1+Ri_tmp/3.)
def do_round(self):
df_new = self.df.copy()
# reinit
self.df_capability = self.df.Capability.to_dict()
self.df_position = self.df.Position.to_dict()
self.df_salience = self.df.Salience.to_dict()
self.max_pos = self.df.Position.max()
self.min_pos = self.df.Position.min()
self.df_orig_position = self.df_orig.Position.to_dict()
offers = [list() for i in range(len(self.df))]
ris = [self.ri(i) for i in range(len(self.df))]
for (i,j) in itertools.combinations(range(len(self.df)), 2 ):
if i==j: continue
eui = self.EU_i(i,j,r=ris[i])
euj = self.EU_j(i,j,r=ris[j])
if eui > 0 and euj > 0 and np.abs(eui) > np.abs(euj):
# conflict - actor i has upper hand
j_moves = self.df_position[i]-self.df_orig_position[j]
print i,j,eui,euj,'conflict', i, 'wins', j, 'moves',j_moves
offers[j].append(j_moves)
elif eui > 0 and euj > 0 and np.abs(eui) < np.abs(euj):
# conflict - actor j has upper hand
i_moves = self.df_position[j]-self.df_orig_position[i]
print i,j,eui,euj,'conflict', j, 'wins', i, 'moves',i_moves
offers[i].append(i_moves)
elif eui > 0 and euj < 0 and np.abs(eui) > np.abs(euj):
# compromise - actor i has the upper hand
print i,j,eui,euj,'compromise', i, 'upper hand'
xhat = (self.df_position[i]-self.df_orig_position[j]) * np.abs(euj/eui)
offers[j].append(xhat)
elif eui < 0 and euj > 0 and np.abs(eui) < np.abs(euj):
# compromise - actor j has the upper hand
print i,j,eui,euj,'compromise', j, 'upper hand'
xhat = (self.df_position[j]-self.df_orig_position[i]) * np.abs(eui/euj)
offers[i].append(xhat)
elif eui > 0 and euj < 0 and np.abs(eui) < np.abs(euj):
# capitulation - actor i has upper hand
j_moves = self.df_position[i]-self.df_orig_position[j]
print i,j,eui,euj,'capitulate', i, 'wins', j, 'moves',j_moves
offers[j].append(j_moves)
elif eui < 0 and euj > 0 and np.abs(eui) > np.abs(euj):
# capitulation - actor j has upper hand
i_moves = self.df_position[j]-self.df_orig_position[i]
print i,j,eui,euj,'capitulate', j, 'wins', i, 'moves',i_moves
offers[i].append(i_moves)
else:
print i,j,eui,euj,'nothing'
# choose offer requiring minimum movement, then
# update positions
print offers
#exit()
df_new['offer'] = map(lambda x: 0 if len(x)==0 else x[np.argmin(np.abs(x))],offers)
df_new.loc[:,'Position'] = df_new.Position + df_new.offer
# in case max/min is exceeded
df_new.loc[df_new['Position']>self.max_pos,'Position'] = self.max_pos
df_new.loc[df_new['Position']<self.min_pos,'Position'] = self.min_pos
self.df = df_new
print self.df
if __name__ == "__main__":
if len(sys.argv) < 3:
print "\nUsage: run.py [CSV] [ROUNDS]"
exit()
df = pd.read_csv(sys.argv[1]); print df
df.Position = df.Position.astype(float)
df.Capability = df.Capability.astype(float)
df.Salience = df.Salience/100.
game = Game(df)
print 'weighted_median', game.weighted_median(), 'mean', game.mean()
results = pd.DataFrame(index=df.index)
for i in range(int(sys.argv[2])):
results[i] = game.df.Position
df = game.do_round(); print df
print 'weighted_median', game.weighted_median(), 'mean', game.mean()
results = results.T
results.columns = game.df.Actor
print results
results.plot()
fout = '%s/out-%s.png' % (os.environ['TEMP'],sys.argv[1].replace(".csv","-csv"))
plt.savefig(fout)
|
burakbayramli/classnotes
|
algs/algs_105_mesquita/test1.py
|
Python
|
gpl-3.0
| 7,691 | 0.021194 |
# -*- coding: utf-8 -*-
###############################################################################
#
# FunnelList
# Gets the names and funnel_ids of your funnels.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FunnelList(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FunnelList Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(FunnelList, self).__init__(temboo_session, '/Library/Mixpanel/DataExport/Funnels/FunnelList')
def new_input_set(self):
return FunnelListInputSet()
def _make_result_set(self, result, path):
return FunnelListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FunnelListChoreographyExecution(session, exec_id, path)
class FunnelListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FunnelList
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided my Mixpanel. You can find your Mixpanel API Key in the project settings dialog in the Mixpanel app.)
"""
super(FunnelListInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((required, string) The API Secret provided by Mixpanel. You can find your Mixpanel API Secret in the project settings dialog in the Mixpanel app.)
"""
super(FunnelListInputSet, self)._set_input('APISecret', value)
def set_Expire(self, value):
"""
Set the value of the Expire input for this Choreo. ((optional, integer) The amount of minutes past NOW() before the request will expire. Defaults to 1.)
"""
super(FunnelListInputSet, self)._set_input('Expire', value)
class FunnelListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FunnelList Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Mixpanel.)
"""
return self._output.get('Response', None)
class FunnelListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FunnelListResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/Mixpanel/DataExport/Funnels/FunnelList.py
|
Python
|
apache-2.0
| 3,581 | 0.004468 |
from django.db.models import Count
from django.http import HttpResponse
from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from rest_framework.authentication import TokenAuthentication
from prometheus_client import generate_latest
from core import models
from core import serializers
from core import consts
tokenBearer = TokenAuthentication
tokenBearer.keyword = 'Bearer'
class TaskLogs(ListAPIView):
model = models.TaskLog
serializer_class = serializers.TaskLogSerializer
def get_queryset(self):
last_log_id = self.request.GET.get('last_log_id', 0)
return self.model.objects.filter(task_id=self.kwargs['task_id'], id__gt=last_log_id)
task_logs = TaskLogs.as_view()
class DjangoMetrics(APIView):
authentication_classes = (tokenBearer,)
def get(self, request):
result = generate_latest().decode()
return HttpResponse(result, content_type='text/plain; charset=utf-8')
class AnsibleManagerMetrics(APIView):
authentication_classes = (tokenBearer,)
def get(self, request):
result = '# HELP ansible_manager_template_last_task_success show success or fail last task\n'
result += '# TYPE ansible_manager_template_last_task_success gauge\n'
for template in models.TaskTemplate.objects.exclude(cron=''):
completed_tasks = template.tasks.filter(status__in=consts.NOT_RUN_STATUSES)
if not completed_tasks:
continue
success = int(completed_tasks.last().status == consts.COMPLETED)
result += 'ansible_manager_template_last_task_success{id="%s", name="%s"} %s\n' % (
template.pk, template.name, success)
result += '# HELP ansible_manager_tasks_completed_total show number of completed tasks\n'
result += '# TYPE ansible_manager_tasks_completed_total gauge\n'
tasks = models.Task.objects.values_list('template__id', 'template__name', 'status').annotate(count=Count('id'))
for template_id, template_name, status, count in tasks:
result += 'ansible_manager_tasks_completed_total{id="%s", name="%s", status="%s"} %s\n' % (
template_id, template_name, status, count
)
return HttpResponse(result, content_type='text/plain; charset=utf-8')
|
telminov/ansible-manager
|
core/views/rest.py
|
Python
|
mit
| 2,316 | 0.003454 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
qualities,
str_or_none,
)
class ReverbNationIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$'
_TESTS = [{
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
'md5': 'c0aaf339bcee189495fdf5a8c8ba8645',
'info_dict': {
'id': '16965047',
'ext': 'mp3',
'title': 'MONA LISA',
'uploader': 'ALKILADOS',
'uploader_id': '216429',
'thumbnail': r're:^https?://.*\.jpg',
},
}]
def _real_extract(self, url):
song_id = self._match_id(url)
api_res = self._download_json(
'https://api.reverbnation.com/song/%s' % song_id,
song_id,
note='Downloading information of song %s' % song_id
)
THUMBNAILS = ('thumbnail', 'image')
quality = qualities(THUMBNAILS)
thumbnails = []
for thumb_key in THUMBNAILS:
if api_res.get(thumb_key):
thumbnails.append({
'url': api_res[thumb_key],
'preference': quality(thumb_key)
})
return {
'id': song_id,
'title': api_res['name'],
'url': api_res['url'],
'uploader': api_res.get('artist', {}).get('name'),
'uploader_id': str_or_none(api_res.get('artist', {}).get('id')),
'thumbnails': thumbnails,
'ext': 'mp3',
'vcodec': 'none',
}
|
fluxw42/youtube-dl
|
youtube_dl/extractor/reverbnation.py
|
Python
|
unlicense
| 1,627 | 0.000615 |
#!C:\Users\SeanSaito\Dev\aviato\flask\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'kartograph.py==0.6.8','console_scripts','kartograph'
__requires__ = 'kartograph.py==0.6.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('kartograph.py==0.6.8', 'console_scripts', 'kartograph')()
)
|
hrishioa/Aviato
|
flask/Scripts/kartograph-script.py
|
Python
|
gpl-2.0
| 364 | 0.005495 |
#!/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet Zika modeling interface.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/zika
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
class LoginRequiredMixin(object):
""" This works: class InterviewListView(LoginRequiredMixin, ListView)
This DOES NOT work: class InterviewListView(ListView, LoginRequiredMixin)
I'm not 100% sure that wrapping as_view function using Mixin is a good idea though, but whatever
"""
@classmethod
def as_view(cls, **initkwargs):
# Ignore PyCharm warning below, this is a Mixin class after all
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
|
ecaldwe1/zika
|
website/mixins.py
|
Python
|
mpl-2.0
| 1,140 | 0.000877 |
import subprocess
from random import choice, randint
from time import sleep
import yaml
from ooni.otime import timestamp
import const
class Test(object):
def __init__(self, testfile, args=[]):
self.testfile = testfile
self.args = args
self.output = None
self.status = None
self.errorMessage = None
self.parser = None
self.report = None
self.reportName = None
def run(self):
self.reportName = "report-%s-%s.yamloo" % (self.testfile, timestamp())
self.output = runTest(self)
self.parseResults()
def parseResults(self):
self.parser = TestParser(self)
self.parser.parseReport()
def printResults(self):
self.parser.printResults()
def getResults(self):
return {
"Status": self.status,
"ErrorMessage": self.errorMessage,
}
class SiteProbe(Test):
def __init__(self, testfile=const.PROBE_TEST, target=const.TOR_SITE_URL):
super(SiteProbe, self).__init__(testfile=testfile, args=["-u", target])
self.target = target
class TCPTest(Test):
def __init__(self, testfile=const.TCP_TEST,
target=const.TOR_DOMAIN, port="443"):
super(TCPTest, self).__init__(
testfile=testfile, args=["-t", target, "-p", port])
self.target = target
class PingTest(Test):
def __init__(self, testfile=const.PING_TEST, target=None):
args = ["-t", target] if target is not None else []
super(PingTest, self).__init__(testfile=testfile, args=args)
self.target = target
self.packets = None
def parsePackets(self, report):
try:
return 'echo-reply' in report['ReceivedPackets'][0][0]['summary']
except:
return False
def parseResults(self):
self.parser = TestParser(self)
self.parser.loadReport()
if self.report['TestStatus'] == 'OK':
self.packets = self.report['packets']
if self.parsePackets(self.report):
self.status = "OK"
return
self.status = "FAILED"
self.errorMessage = "Host unreachable"
raise TestException(self)
class DNSTest(Test):
def __init__(self, testfile=const.DNS_TEST, target=const.TOR_DOMAIN):
super(DNSTest, self).__init__(testfile=testfile, args=["-t", target])
self.target = target
class Traceroute(Test):
def __init__(self, testfile=const.TRACEROUTE_TEST, target=None):
args = ["-b", target] if target is not None else []
super(Traceroute, self).__init__(testfile=testfile, args=args)
self.target = target
class TestParser(object):
def __init__(self, test):
self.test = test
def loadReport(self):
with open(self.test.reportName, 'r') as f:
entries = yaml.safe_load_all(f)
headers = entries.next()
self.test.report = entries.next()
def parseReport(self):
self.loadReport()
self.test.status = self.test.report['TestStatus']
if not self.test.status == "OK":
self.test.errorMessage = self.test.report['TestException']
raise TestException(self.test)
def printResults(self):
print "Test: %s" % self.test.testfile
if hasattr(self.test, "target") and self.test.target is not None:
print "Target: %s" % self.test.target
results = self.test.getResults()
for key, value in results.iteritems():
if key and value:
print "%s: %s" % (key, value)
class TestCase(list):
def __init__(self, tests=[], sleep_interval=const.SLEEP_INTERVAL):
super(TestCase, self).__init__(tests)
self.sleepInterval = sleep_interval
def run(self):
tests = testCaseGenerator(list(self))
for test in tests:
try:
test.run()
except TestException, e:
print e
sleep(randint(self.sleepInterval[0], self.sleepInterval[1]))
def printResults(self):
for test in self:
test.printResults()
print
def getFailed(self):
return [test for test in self if test.status != "OK"]
def testCaseGenerator(seq):
for x in range(len(seq)):
test = choice(seq)
seq.remove(test)
yield test
def runTest(test):
binary = const.OONI_BINARY
args = [binary, "-o", test.reportName, "-n", test.testfile]
if test.args:
args += test.args
print "Running test %s" % test.testfile
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
return output
class TestException(Exception):
def __init__(self, test):
self.testInstance = test
def __str__(self):
return "%s: %s (%s)" % (self.testInstance.testfile,
self.testInstance.status,
self.testInstance.errorMessage)
|
edagar/censorship-analyser
|
test.py
|
Python
|
bsd-3-clause
| 5,035 | 0.000199 |
import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="tickcolor", parent_name="parcats.line.colorbar", **kwargs
):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/parcats/line/colorbar/_tickcolor.py
|
Python
|
mit
| 480 | 0.002083 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function.
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading.
- Required when creating a function. Uses parameters as described in boto3 docs.
- Required when C(state=present).
- For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present).
handler:
description:
- The function within your code that Lambda calls to begin execution.
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored.
- If C(state=present) then either zip_file or s3_bucket must be present.
- C(s3_bucket) and C(s3_key) are required together.
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload.
- C(s3_bucket) and C(s3_key) are required together.
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given.
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above).
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass an empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
import re
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account information (account id and partition) we are currently working on
get_account_info tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privileges to
the account should be enough to permit this.
"""
account_id = None
partition = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
caller_id = sts_client.get_caller_identity()
account_id = caller_id.get('Account')
partition = caller_id.get('Arn').split(':')[1]
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
arn, partition, service, reg, account_id, resource = iam_client.get_user()['User']['Arn'].split(':')
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
m = except_msg.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/")
account_id = m.group(4)
partition = m.group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
if partition is None:
module.fail_json_aws(e, msg="getting account information: partition")
except Exception as e:
module.fail_json_aws(e, msg="getting account information")
return account_id, partition
def get_current_function(connection, function_name, qualifier=None):
try:
if qualifier is not None:
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
return connection.get_function(FunctionName=function_name)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return None
except (KeyError, AttributeError):
pass
raise e
def sha256sum(filename):
hasher = hashlib.sha256()
with open(filename, 'rb') as f:
hasher.update(f.read())
code_hash = hasher.digest()
code_b64 = base64.b64encode(code_hash)
hex_digest = code_b64.decode('utf-8')
return hex_digest
def set_tag(client, module, tags, function):
if not hasattr(client, "list_tags"):
module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
changed = False
arn = function['Configuration']['FunctionArn']
try:
current_tags = client.list_tags(Resource=arn).get('Tags', {})
except ClientError as e:
module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
try:
if tags_to_remove:
client.untag_resource(
Resource=arn,
TagKeys=tags_to_remove
)
changed = True
if tags_to_add:
client.tag_resource(
Resource=arn,
Tags=tags_to_add
)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc())
return changed
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
runtime=dict(),
role=dict(),
handler=dict(),
zip_file=dict(aliases=['src']),
s3_bucket=dict(),
s3_key=dict(),
s3_object_version=dict(),
description=dict(default=''),
timeout=dict(type='int', default=3),
memory_size=dict(type='int', default=128),
vpc_subnet_ids=dict(type='list'),
vpc_security_group_ids=dict(type='list'),
environment_variables=dict(type='dict'),
dead_letter_arn=dict(),
tags=dict(type='dict'),
)
mutually_exclusive = [['zip_file', 's3_key'],
['zip_file', 's3_bucket'],
['zip_file', 's3_object_version']]
required_together = [['s3_key', 's3_bucket'],
['vpc_subnet_ids', 'vpc_security_group_ids']]
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if)
name = module.params.get('name')
state = module.params.get('state').lower()
runtime = module.params.get('runtime')
role = module.params.get('role')
handler = module.params.get('handler')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
s3_object_version = module.params.get('s3_object_version')
zip_file = module.params.get('zip_file')
description = module.params.get('description')
timeout = module.params.get('timeout')
memory_size = module.params.get('memory_size')
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
environment_variables = module.params.get('environment_variables')
dead_letter_arn = module.params.get('dead_letter_arn')
tags = module.params.get('tags')
check_mode = module.check_mode
changed = False
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (ClientError, ValidationError) as e:
module.fail_json_aws(e, msg="Trying to connect to AWS")
if state == 'present':
if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
role_arn = role
else:
# get account ID and assemble ARN
account_id, partition = get_account_info(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role)
# Get function configuration if present, False otherwise
current_function = get_current_function(client, name)
# Update existing Lambda function
if state == 'present' and current_function:
# Get current state
current_config = current_function['Configuration']
current_version = None
# Update function configuration
func_kwargs = {'FunctionName': name}
# Update configuration if needed
if role_arn and current_config['Role'] != role_arn:
func_kwargs.update({'Role': role_arn})
if handler and current_config['Handler'] != handler:
func_kwargs.update({'Handler': handler})
if description and current_config['Description'] != description:
func_kwargs.update({'Description': description})
if timeout and current_config['Timeout'] != timeout:
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
if (environment_variables is not None) and (current_config.get(
'Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
else:
if dead_letter_arn != "":
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# Check for unsupported mutation
if current_config['Runtime'] != runtime:
module.fail_json(msg='Cannot change runtime. Please recreate the function')
# If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
if 'VpcConfig' in current_config:
# Compare VPC config with current config
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}
func_kwargs.update({'VpcConfig': new_vpc_config})
else:
# No VPC configuration is desired, assure VPC config is empty when present in current config
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
# Upload new configuration if configuration has changed
if len(func_kwargs) > 1:
try:
if not check_mode:
response = client.update_function_configuration(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to update lambda configuration")
# Update code configuration
code_kwargs = {'FunctionName': name, 'Publish': True}
# Update S3 location
if s3_bucket and s3_key:
# If function is stored on S3 always update
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
# If S3 Object Version is given
if s3_object_version:
code_kwargs.update({'S3ObjectVersion': s3_object_version})
# Compare local checksum, update remote code when different
elif zip_file:
local_checksum = sha256sum(zip_file)
remote_checksum = current_config['CodeSha256']
# Only upload new code when local code is different compared to the remote code
if local_checksum != remote_checksum:
try:
with open(zip_file, 'rb') as f:
encoded_zip = f.read()
code_kwargs.update({'ZipFile': encoded_zip})
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
# Tag Function
if tags is not None:
if set_tag(client, module, tags, current_function):
changed = True
# Upload new code if needed (e.g. code checksum has changed)
if len(code_kwargs) > 2:
try:
if not check_mode:
response = client.update_function_code(**code_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to upload new code")
# Describe function code and configuration
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after updating')
# We're done
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Function doesn't exists, create new Lambda function
elif state == 'present':
if s3_bucket and s3_key:
# If function is stored on S3
code = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
if s3_object_version:
code.update({'S3ObjectVersion': s3_object_version})
elif zip_file:
# If function is stored in local zipfile
try:
with open(zip_file, 'rb') as f:
zip_content = f.read()
code = {'ZipFile': zip_content}
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
else:
module.fail_json(msg='Either S3 object or path to zipfile required')
func_kwargs = {'FunctionName': name,
'Publish': True,
'Runtime': runtime,
'Role': role_arn,
'Code': code,
'Timeout': timeout,
'MemorySize': memory_size,
}
if description is not None:
func_kwargs.update({'Description': description})
if handler is not None:
func_kwargs.update({'Handler': handler})
if environment_variables:
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# If VPC configuration is given
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}})
# Finally try to create function
current_version = None
try:
if not check_mode:
response = client.create_function(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to create function")
# Tag Function
if tags is not None:
if set_tag(client, module, tags, get_current_function(client, name)):
changed = True
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after creating')
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Delete existing Lambda function
if state == 'absent' and current_function:
try:
if not check_mode:
client.delete_function(FunctionName=name)
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to delete Lambda function")
module.exit_json(changed=changed)
# Function already absent, do nothing
elif state == 'absent':
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
thaim/ansible
|
lib/ansible/modules/cloud/amazon/lambda.py
|
Python
|
mit
| 23,103 | 0.002813 |
# -*- coding: utf-8 -*-
'''
Django settings for foodcheck project
'''
# Copyright (C) 2013 Timothy James Austen, Eileen Qiuhua Lin,
# Richard Esplin <richard-oss@esplins.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import imp, os
# a setting to determine whether we are running on OpenShift
ON_OPENSHIFT = False
if os.environ.has_key('OPENSHIFT_REPO_DIR'):
ON_OPENSHIFT = True
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
<<<<<<< HEAD:wsgi/foodcheck_proj/settings.py
# turn off debug when on production
if (os.environ['OPENSHIFT_NAMESPACE'] == 'foodcheck' and
os.environ['OPENSHIFT_APP_NAME'] == 'live'):
DEBUG = False
=======
if ON_OPENSHIFT:
DEBUG = bool(os.environ.get('DEBUG', False))
if DEBUG:
print("WARNING: The DEBUG environment is set to True.")
>>>>>>> django-example/master:wsgi/openshift/settings.py
else:
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
('Richard Esplin', 'richard-oss@esplins.org'),
('Timothy Austen', 'austentj@gmail.com'),
)
MANAGERS = ADMINS
if ON_OPENSHIFT:
# os.environ['OPENSHIFT_MYSQL_DB_*'] variables can be used with databases created
# with rhc cartridge add (see /README in this git repo)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['OPENSHIFT_APP_NAME'],
'USER': os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME'],
'PASSWORD': os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD'],
'HOST': os.environ['OPENSHIFT_POSTGRESQL_DB_HOST'],
'PORT': os.environ['OPENSHIFT_POSTGRESQL_DB_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'sqlite3.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.environ.get('OPENSHIFT_DATA_DIR', '')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# Listing the project dir here avoids having to collect static files in a
# subdirectory i.e. /static/css instead of /static/foodcheck_proj/css
# example: os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make a dictionary of default keys
default_keys = { 'SECRET_KEY': 'vm4rl5*ymb@2&d_(gc$gb-^twq9w(u69hi--%$5xrh!xk(t%hw' }
# Replace default keys with dynamic values if we are in OpenShift
use_keys = default_keys
if ON_OPENSHIFT:
imp.find_module('openshiftlibs')
import openshiftlibs
use_keys = openshiftlibs.openshift_secure(default_keys)
# Make this unique, and don't share it with anybody.
SECRET_KEY = use_keys['SECRET_KEY']
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'foodcheck_proj.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'leaflet',
'foodcheck_app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
# Leaflet settings
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (37.7750, -122.4183),
'DEFAULT_ZOOM': 10,
'PLUGINS': {
'main': {
'js': STATIC_URL + 'js/demo.js',
},
}
}
# vim:expandtab tabstop=8 shiftwidth=4 ts=8 sw=4 softtabstop=4
|
esplinr/foodcheck
|
wsgi/foodcheck_proj/settings.py
|
Python
|
agpl-3.0
| 8,249 | 0.003758 |
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" periodic table data, **obsolete**
now that the C++ code exposes an interface to the internal PT stuff,
this data is mostly obsolete
"""
# Num Symb RCov RBO RVdW Max Bnd Mass nval
periodicData=\
"""
0 X 0.0 0.0 0.0 0 0.000 0
1 H 0.230 0.330 1.200 1 1.008 1
2 He 0.930 0.700 1.400 0 4.003 2
3 Li 0.680 1.230 1.820 1 6.941 1
4 Be 0.350 0.900 1.700 2 9.012 2
5 B 0.830 0.820 2.080 3 10.812 3
6 C 0.680 0.770 1.950 4 12.011 4
7 N 0.680 0.700 1.850 4 14.007 5
8 O 0.680 0.660 1.700 2 15.999 6
9 F 0.640 0.611 1.730 1 18.998 7
10 Ne 1.120 0.700 1.540 0 20.180 8
11 Na 0.970 1.540 2.270 1 22.990 1
12 Mg 1.100 1.360 1.730 2 24.305 2
13 Al 1.350 1.180 2.050 6 26.982 3
14 Si 1.200 0.937 2.100 6 28.086 4
15 P 0.750 0.890 2.080 5 30.974 5
16 S 1.020 1.040 2.000 6 32.067 6
17 Cl 0.990 0.997 1.970 1 35.453 7
18 Ar 1.570 1.740 1.880 0 39.948 8
19 K 1.330 2.030 2.750 1 39.098 1
20 Ca 0.990 1.740 1.973 2 40.078 2
21 Sc 1.440 1.440 1.700 6 44.956 3
22 Ti 1.470 1.320 1.700 6 47.867 4
23 V 1.330 1.220 1.700 6 50.942 5
24 Cr 1.350 1.180 1.700 6 51.996 6
25 Mn 1.350 1.170 1.700 8 54.938 7
26 Fe 1.340 1.170 1.700 6 55.845 8
27 Co 1.330 1.160 1.700 6 58.933 9
28 Ni 1.500 1.150 1.630 6 58.693 10
29 Cu 1.520 1.170 1.400 6 63.546 11
30 Zn 1.450 1.250 1.390 6 65.39 2
31 Ga 1.220 1.260 1.870 3 69.723 3
32 Ge 1.170 1.188 1.700 4 72.61 4
33 As 1.210 1.200 1.850 3 74.922 5
34 Se 1.220 1.170 1.900 2 78.96 6
35 Br 1.210 1.167 2.100 1 79.904 7
36 Kr 1.910 1.910 2.020 0 83.80 8
37 Rb 1.470 2.160 1.700 1 85.468 1
38 Sr 1.120 1.910 1.700 2 87.62 2
39 Y 1.780 1.620 1.700 6 88.906 3
40 Zr 1.560 1.450 1.700 6 91.224 4
41 Nb 1.480 1.340 1.700 6 92.906 5
42 Mo 1.470 1.300 1.700 6 95.94 6
43 Tc 1.350 1.270 1.700 6 98.0 7
44 Ru 1.400 1.250 1.700 6 101.07 8
45 Rh 1.450 1.250 1.700 6 102.906 9
46 Pd 1.500 1.280 1.630 6 106.42 10
47 Ag 1.590 1.340 1.720 6 107.868 11
48 Cd 1.690 1.480 1.580 6 112.412 2
49 In 1.630 1.440 1.930 3 114.818 3
50 Sn 1.460 1.385 2.170 4 118.711 4
51 Sb 1.460 1.400 2.200 3 121.760 5
52 Te 1.470 1.378 2.060 2 127.60 6
53 I 1.400 1.387 2.150 1 126.904 7
54 Xe 1.980 1.980 2.160 0 131.29 8
55 Cs 1.670 2.350 1.700 1 132.905 1
56 Ba 1.340 1.980 1.700 2 137.328 2
57 La 1.870 1.690 1.700 12 138.906 3
58 Ce 1.830 1.830 1.700 6 140.116 4
59 Pr 1.820 1.820 1.700 6 140.908 3
60 Nd 1.810 1.810 1.700 6 144.24 4
61 Pm 1.800 1.800 1.700 6 145.0 5
62 Sm 1.800 1.800 1.700 6 150.36 6
63 Eu 1.990 1.990 1.700 6 151.964 7
64 Gd 1.790 1.790 1.700 6 157.25 8
65 Tb 1.760 1.760 1.700 6 158.925 9
66 Dy 1.750 1.750 1.700 6 162.50 10
67 Ho 1.740 1.740 1.700 6 164.930 11
68 Er 1.730 1.730 1.700 6 167.26 12
69 Tm 1.720 1.720 1.700 6 168.934 13
70 Yb 1.940 1.940 1.700 6 173.04 14
71 Lu 1.720 1.720 1.700 6 174.967 15
72 Hf 1.570 1.440 1.700 6 178.49 4
73 Ta 1.430 1.340 1.700 6 180.948 5
74 W 1.370 1.300 1.700 6 183.84 6
75 Re 1.350 1.280 1.700 6 186.207 7
76 Os 1.370 1.260 1.700 6 190.23 8
77 Ir 1.320 1.270 1.700 6 192.217 9
78 Pt 1.500 1.300 1.720 6 195.078 10
79 Au 1.500 1.340 1.660 6 196.967 11
80 Hg 1.700 1.490 1.550 6 200.59 2
81 Tl 1.550 1.480 1.960 3 204.383 3
82 Pb 1.540 1.480 2.020 4 207.2 4
83 Bi 1.540 1.450 1.700 3 208.980 5
84 Po 1.680 1.460 1.700 2 209.0 6
85 At 1.700 1.450 1.700 1 210.0 7
86 Rn 2.400 2.400 1.700 0 222.0 8
87 Fr 2.000 2.000 1.700 1 223.0 1
88 Ra 1.900 1.900 1.700 2 226.0 2
89 Ac 1.880 1.880 1.700 6 227.0 3
90 Th 1.790 1.790 1.700 6 232.038 4
91 Pa 1.610 1.610 1.700 6 231.036 3
92 U 1.580 1.580 1.860 6 238.029 4
93 Np 1.550 1.550 1.700 6 237.0 5
94 Pu 1.530 1.530 1.700 6 244.0 6
95 Am 1.510 1.070 1.700 6 243.0 7
96 Cm 1.500 0.000 1.700 6 247.0 8
97 Bk 1.500 0.000 1.700 6 247.0 9
98 Cf 1.500 0.000 1.700 6 251.0 10
99 Es 1.500 0.000 1.700 6 252.0 11
100 Fm 1.500 0.000 1.700 6 257.0 12
101 Md 1.500 0.000 1.700 6 258.0 13
102 No 1.500 0.000 1.700 6 259.0 14
103 Lr 1.500 0.000 1.700 6 262.0 15
"""
nameTable = {}
numTable = {}
for line in periodicData.split('\n'):
splitLine = line.split()
if len(splitLine) > 1:
nameTable[splitLine[1]] = (int(splitLine[0]),float(splitLine[6]),int(splitLine[7]),\
int(splitLine[5]),float(splitLine[2]),float(splitLine[3]),
float(splitLine[4]))
numTable[int(splitLine[0])] = (splitLine[1],float(splitLine[6]),int(splitLine[7]),\
int(splitLine[5]),float(splitLine[2]),float(splitLine[3]),
float(splitLine[4]))
# a list of metals (transition metals, semi-metals, lanthanides and actinides)
metalRanges = ["13", "21-32", "39-51", "57-84", "89-103"]
metalNumList = []
for entry in metalRanges:
t = entry.split('-')
start = int(t[0])
if len(t) > 1:
end = int(t[1])
else:
end = start
if start > end:
start, end = end, start
metalNumList += range(start, end + 1)
metalNames = map(lambda x: numTable[x][0], metalNumList)
# these are from table 4 of Rev. Comp. Chem. vol 2, 367-422, (1991)
# the order is [alpha(SP),alpha(SP2),alpha(SP3)]
# where values are not known, None has been inserted
hallKierAlphas = {
'H': [0.0, 0.0, 0.0], # removes explicit H's from consideration in the shape
'C': [-0.22, -0.13, 0.0],
'N': [-0.29, -0.20, -0.04],
'O': [None, -0.20, -0.04],
'F': [None, None, -0.07],
'P': [None, 0.30, 0.43],
'S': [None, 0.22, 0.35],
'Cl': [None, None, 0.29],
'Br': [None, None, 0.48],
'I': [None, None, 0.73]
}
|
jandom/rdkit
|
rdkit/Chem/PeriodicTable.py
|
Python
|
bsd-3-clause
| 5,870 | 0.018739 |
# Copyright 2017 Speech Lab, EE Dept., IITM (Author: Srinivas Venkattaramanujam)
class Entry:
def __init__(self, begin_time, end_time, status, word_begin, word_end):
self.begin_time=float(begin_time)
self.end_time=float(end_time)
self.status=status
self.word_begin=int(word_begin)
self.word_end=int(word_end)
|
dreaming-dog/kaldi-long-audio-alignment
|
scripts/classes/entry.py
|
Python
|
apache-2.0
| 326 | 0.04908 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestLogger(unittest2.TestCase):
PROJECT = 'test-project'
LOGGER_NAME = 'logger-name'
def _getTargetClass(self):
from gcloud.logging.logger import Logger
return Logger
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
conn = object()
client = _Client(self.PROJECT, conn)
logger = self._makeOne(self.LOGGER_NAME, client=client)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertTrue(logger.client is client)
self.assertEqual(logger.project, self.PROJECT)
self.assertEqual(logger.full_name, 'projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.path, '/projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.labels, None)
def test_ctor_explicit(self):
LABELS = {'foo': 'bar', 'baz': 'qux'}
conn = object()
client = _Client(self.PROJECT, conn)
logger = self._makeOne(self.LOGGER_NAME, client=client, labels=LABELS)
self.assertEqual(logger.name, self.LOGGER_NAME)
self.assertTrue(logger.client is client)
self.assertEqual(logger.project, self.PROJECT)
self.assertEqual(logger.full_name, 'projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.path, '/projects/%s/logs/%s'
% (self.PROJECT, self.LOGGER_NAME))
self.assertEqual(logger.labels, LABELS)
def test_batch_w_bound_client(self):
from gcloud.logging.logger import Batch
conn = object()
client = _Client(self.PROJECT, conn)
logger = self._makeOne(self.LOGGER_NAME, client=client)
batch = logger.batch()
self.assertTrue(isinstance(batch, Batch))
self.assertTrue(batch.logger is logger)
self.assertTrue(batch.client is client)
def test_batch_w_alternate_client(self):
from gcloud.logging.logger import Batch
conn1 = object()
conn2 = object()
client1 = _Client(self.PROJECT, conn1)
client2 = _Client(self.PROJECT, conn2)
logger = self._makeOne(self.LOGGER_NAME, client=client1)
batch = logger.batch(client2)
self.assertTrue(isinstance(batch, Batch))
self.assertTrue(batch.logger is logger)
self.assertTrue(batch.client is client2)
def test_log_text_w_str_implicit_client(self):
TEXT = 'TEXT'
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'textPayload': TEXT,
'resource': {
'type': 'global',
},
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client)
logger.log_text(TEXT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_text_w_default_labels(self):
TEXT = 'TEXT'
DEFAULT_LABELS = {'foo': 'spam'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'textPayload': TEXT,
'resource': {
'type': 'global',
},
'labels': DEFAULT_LABELS,
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client,
labels=DEFAULT_LABELS)
logger.log_text(TEXT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_text_w_unicode_explicit_client_labels_severity_httpreq(self):
TEXT = u'TEXT'
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'textPayload': TEXT,
'resource': {
'type': 'global',
},
'labels': LABELS,
'insertId': IID,
'severity': SEVERITY,
'httpRequest': REQUEST,
}]
client1 = _Client(self.PROJECT)
client2 = _Client(self.PROJECT)
api = client2.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client1,
labels=DEFAULT_LABELS)
logger.log_text(TEXT, client=client2, labels=LABELS,
insert_id=IID, severity=SEVERITY, http_request=REQUEST)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_struct_w_implicit_client(self):
STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'jsonPayload': STRUCT,
'resource': {
'type': 'global',
},
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client)
logger.log_struct(STRUCT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_struct_w_default_labels(self):
STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'}
DEFAULT_LABELS = {'foo': 'spam'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'jsonPayload': STRUCT,
'resource': {
'type': 'global',
},
'labels': DEFAULT_LABELS,
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client,
labels=DEFAULT_LABELS)
logger.log_struct(STRUCT)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_struct_w_explicit_client_labels_severity_httpreq(self):
STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'}
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'jsonPayload': STRUCT,
'resource': {
'type': 'global',
},
'labels': LABELS,
'insertId': IID,
'severity': SEVERITY,
'httpRequest': REQUEST,
}]
client1 = _Client(self.PROJECT)
client2 = _Client(self.PROJECT)
api = client2.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client1,
labels=DEFAULT_LABELS)
logger.log_struct(STRUCT, client=client2, labels=LABELS,
insert_id=IID, severity=SEVERITY,
http_request=REQUEST)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_proto_w_implicit_client(self):
import json
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Struct, Value
message = Struct(fields={'foo': Value(bool_value=True)})
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'protoPayload': json.loads(MessageToJson(message)),
'resource': {
'type': 'global',
},
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client)
logger.log_proto(message)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_proto_w_default_labels(self):
import json
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Struct, Value
message = Struct(fields={'foo': Value(bool_value=True)})
DEFAULT_LABELS = {'foo': 'spam'}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'protoPayload': json.loads(MessageToJson(message)),
'resource': {
'type': 'global',
},
'labels': DEFAULT_LABELS,
}]
client = _Client(self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client,
labels=DEFAULT_LABELS)
logger.log_proto(message)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_log_proto_w_explicit_client_labels_severity_httpreq(self):
import json
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Struct, Value
message = Struct(fields={'foo': Value(bool_value=True)})
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
ENTRIES = [{
'logName': 'projects/%s/logs/%s' % (
self.PROJECT, self.LOGGER_NAME),
'protoPayload': json.loads(MessageToJson(message)),
'resource': {
'type': 'global',
},
'labels': LABELS,
'insertId': IID,
'severity': SEVERITY,
'httpRequest': REQUEST,
}]
client1 = _Client(self.PROJECT)
client2 = _Client(self.PROJECT)
api = client2.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client1,
labels=DEFAULT_LABELS)
logger.log_proto(message, client=client2, labels=LABELS,
insert_id=IID, severity=SEVERITY,
http_request=REQUEST)
self.assertEqual(api._write_entries_called_with,
(ENTRIES, None, None, None))
def test_delete_w_bound_client(self):
client = _Client(project=self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client)
logger.delete()
self.assertEqual(api._logger_delete_called_with,
(self.PROJECT, self.LOGGER_NAME))
def test_delete_w_alternate_client(self):
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.logging_api = _DummyLoggingAPI()
logger = self._makeOne(self.LOGGER_NAME, client=client1)
logger.delete(client=client2)
self.assertEqual(api._logger_delete_called_with,
(self.PROJECT, self.LOGGER_NAME))
def test_list_entries_defaults(self):
LISTED = {
'projects': None,
'filter_': 'logName=projects/%s/logs/%s' %
(self.PROJECT, self.LOGGER_NAME),
'order_by': None,
'page_size': None,
'page_token': None,
}
TOKEN = 'TOKEN'
client = _Client(self.PROJECT)
client._token = TOKEN
logger = self._makeOne(self.LOGGER_NAME, client=client)
entries, token = logger.list_entries()
self.assertEqual(len(entries), 0)
self.assertEqual(token, TOKEN)
self.assertEqual(client._listed, LISTED)
def test_list_entries_explicit(self):
from gcloud.logging import DESCENDING
PROJECT1 = 'PROJECT1'
PROJECT2 = 'PROJECT2'
FILTER = 'resource.type:global'
TOKEN = 'TOKEN'
PAGE_SIZE = 42
LISTED = {
'projects': ['PROJECT1', 'PROJECT2'],
'filter_': '%s AND logName=projects/%s/logs/%s' %
(FILTER, self.PROJECT, self.LOGGER_NAME),
'order_by': DESCENDING,
'page_size': PAGE_SIZE,
'page_token': TOKEN,
}
client = _Client(self.PROJECT)
logger = self._makeOne(self.LOGGER_NAME, client=client)
entries, token = logger.list_entries(
projects=[PROJECT1, PROJECT2], filter_=FILTER, order_by=DESCENDING,
page_size=PAGE_SIZE, page_token=TOKEN)
self.assertEqual(len(entries), 0)
self.assertEqual(token, None)
self.assertEqual(client._listed, LISTED)
class TestBatch(unittest2.TestCase):
PROJECT = 'test-project'
def _getTargetClass(self):
from gcloud.logging.logger import Batch
return Batch
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
logger = _Logger()
client = _Client(project=self.PROJECT)
batch = self._makeOne(logger, client)
self.assertTrue(batch.logger is logger)
self.assertTrue(batch.client is client)
self.assertEqual(len(batch.entries), 0)
def test_log_text_defaults(self):
TEXT = 'This is the entry text'
client = _Client(project=self.PROJECT, connection=object())
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_text(TEXT)
self.assertEqual(batch.entries,
[('text', TEXT, None, None, None, None)])
def test_log_text_explicit(self):
TEXT = 'This is the entry text'
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
client = _Client(project=self.PROJECT, connection=object())
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_text(TEXT, labels=LABELS, insert_id=IID, severity=SEVERITY,
http_request=REQUEST)
self.assertEqual(batch.entries,
[('text', TEXT, LABELS, IID, SEVERITY, REQUEST)])
def test_log_struct_defaults(self):
STRUCT = {'message': 'Message text', 'weather': 'partly cloudy'}
client = _Client(project=self.PROJECT, connection=object())
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_struct(STRUCT)
self.assertEqual(batch.entries,
[('struct', STRUCT, None, None, None, None)])
def test_log_struct_explicit(self):
STRUCT = {'message': 'Message text', 'weather': 'partly cloudy'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
client = _Client(project=self.PROJECT, connection=object())
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_struct(STRUCT, labels=LABELS, insert_id=IID,
severity=SEVERITY, http_request=REQUEST)
self.assertEqual(batch.entries,
[('struct', STRUCT, LABELS, IID, SEVERITY, REQUEST)])
def test_log_proto_defaults(self):
from google.protobuf.struct_pb2 import Struct, Value
message = Struct(fields={'foo': Value(bool_value=True)})
client = _Client(project=self.PROJECT, connection=object())
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_proto(message)
self.assertEqual(batch.entries,
[('proto', message, None, None, None, None)])
def test_log_proto_explicit(self):
from google.protobuf.struct_pb2 import Struct, Value
message = Struct(fields={'foo': Value(bool_value=True)})
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
client = _Client(project=self.PROJECT, connection=object())
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_proto(message, labels=LABELS, insert_id=IID,
severity=SEVERITY, http_request=REQUEST)
self.assertEqual(batch.entries,
[('proto', message, LABELS, IID, SEVERITY, REQUEST)])
def test_commit_w_invalid_entry_type(self):
logger = _Logger()
client = _Client(project=self.PROJECT, connection=object())
batch = self._makeOne(logger, client)
batch.entries.append(('bogus', 'BOGUS', None, None, None, None))
with self.assertRaises(ValueError):
batch.commit()
def test_commit_w_bound_client(self):
import json
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Struct, Value
TEXT = 'This is the entry text'
STRUCT = {'message': TEXT, 'weather': 'partly cloudy'}
message = Struct(fields={'foo': Value(bool_value=True)})
IID1 = 'IID1'
IID2 = 'IID2'
IID3 = 'IID3'
RESOURCE = {
'type': 'global',
}
ENTRIES = [
{'textPayload': TEXT, 'insertId': IID1},
{'jsonPayload': STRUCT, 'insertId': IID2},
{'protoPayload': json.loads(MessageToJson(message)),
'insertId': IID3},
]
client = _Client(project=self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = _Logger()
batch = self._makeOne(logger, client=client)
batch.log_text(TEXT, insert_id=IID1)
batch.log_struct(STRUCT, insert_id=IID2)
batch.log_proto(message, insert_id=IID3)
batch.commit()
self.assertEqual(list(batch.entries), [])
self.assertEqual(api._write_entries_called_with,
(ENTRIES, logger.full_name, RESOURCE, None))
def test_commit_w_alternate_client(self):
import json
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Struct, Value
from gcloud.logging.logger import Logger
TEXT = 'This is the entry text'
STRUCT = {'message': TEXT, 'weather': 'partly cloudy'}
message = Struct(fields={'foo': Value(bool_value=True)})
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {
'foo': 'bar',
'baz': 'qux',
}
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
client1 = _Client(project=self.PROJECT)
client2 = _Client(project=self.PROJECT)
api = client2.logging_api = _DummyLoggingAPI()
logger = Logger('logger_name', client1, labels=DEFAULT_LABELS)
RESOURCE = {'type': 'global'}
ENTRIES = [
{'textPayload': TEXT, 'labels': LABELS},
{'jsonPayload': STRUCT, 'severity': SEVERITY},
{'protoPayload': json.loads(MessageToJson(message)),
'httpRequest': REQUEST},
]
batch = self._makeOne(logger, client=client1)
batch.log_text(TEXT, labels=LABELS)
batch.log_struct(STRUCT, severity=SEVERITY)
batch.log_proto(message, http_request=REQUEST)
batch.commit(client=client2)
self.assertEqual(list(batch.entries), [])
self.assertEqual(api._write_entries_called_with,
(ENTRIES, logger.full_name, RESOURCE, DEFAULT_LABELS))
def test_context_mgr_success(self):
import json
from google.protobuf.json_format import MessageToJson
from google.protobuf.struct_pb2 import Struct, Value
from gcloud.logging.logger import Logger
TEXT = 'This is the entry text'
STRUCT = {'message': TEXT, 'weather': 'partly cloudy'}
message = Struct(fields={'foo': Value(bool_value=True)})
DEFAULT_LABELS = {'foo': 'spam'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
client = _Client(project=self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = Logger('logger_name', client, labels=DEFAULT_LABELS)
RESOURCE = {
'type': 'global',
}
ENTRIES = [
{'textPayload': TEXT, 'httpRequest': REQUEST},
{'jsonPayload': STRUCT, 'labels': LABELS},
{'protoPayload': json.loads(MessageToJson(message)),
'severity': SEVERITY},
]
batch = self._makeOne(logger, client=client)
with batch as other:
other.log_text(TEXT, http_request=REQUEST)
other.log_struct(STRUCT, labels=LABELS)
other.log_proto(message, severity=SEVERITY)
self.assertEqual(list(batch.entries), [])
self.assertEqual(api._write_entries_called_with,
(ENTRIES, logger.full_name, RESOURCE, DEFAULT_LABELS))
def test_context_mgr_failure(self):
from google.protobuf.struct_pb2 import Struct, Value
TEXT = 'This is the entry text'
STRUCT = {'message': TEXT, 'weather': 'partly cloudy'}
LABELS = {'foo': 'bar', 'baz': 'qux'}
IID = 'IID'
SEVERITY = 'CRITICAL'
METHOD = 'POST'
URI = 'https://api.example.com/endpoint'
STATUS = '500'
REQUEST = {
'requestMethod': METHOD,
'requestUrl': URI,
'status': STATUS,
}
message = Struct(fields={'foo': Value(bool_value=True)})
client = _Client(project=self.PROJECT)
api = client.logging_api = _DummyLoggingAPI()
logger = _Logger()
UNSENT = [
('text', TEXT, None, IID, None, None),
('struct', STRUCT, None, None, SEVERITY, None),
('proto', message, LABELS, None, None, REQUEST),
]
batch = self._makeOne(logger, client=client)
try:
with batch as other:
other.log_text(TEXT, insert_id=IID)
other.log_struct(STRUCT, severity=SEVERITY)
other.log_proto(message, labels=LABELS, http_request=REQUEST)
raise _Bugout()
except _Bugout:
pass
self.assertEqual(list(batch.entries), UNSENT)
self.assertEqual(api._write_entries_called_with, None)
class _Logger(object):
labels = None
def __init__(self, name="NAME", project="PROJECT"):
self.full_name = 'projects/%s/logs/%s' % (project, name)
class _DummyLoggingAPI(object):
_write_entries_called_with = None
def write_entries(self, entries, logger_name=None, resource=None,
labels=None):
self._write_entries_called_with = (
entries, logger_name, resource, labels)
def logger_delete(self, project, logger_name):
self._logger_delete_called_with = (project, logger_name)
class _Client(object):
_listed = _token = None
_entries = ()
def __init__(self, project, connection=None):
self.project = project
self.connection = connection
def list_entries(self, **kw):
self._listed = kw
return self._entries, self._token
class _Bugout(Exception):
pass
|
waprin/gcloud-python
|
gcloud/logging/test_logger.py
|
Python
|
apache-2.0
| 25,904 | 0 |
# Generated by Django 2.2.4 on 2019-08-07 19:56
import awx.main.utils.polymorphic
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
from awx.main.migrations._rbac import (
rebuild_role_parentage, rebuild_role_hierarchy,
migrate_ujt_organization, migrate_ujt_organization_backward,
restore_inventory_admins, restore_inventory_admins_backward
)
def rebuild_jt_parents(apps, schema_editor):
rebuild_role_parentage(apps, schema_editor, models=('jobtemplate',))
class Migration(migrations.Migration):
dependencies = [
('main', '0108_v370_unifiedjob_dependencies_processed'),
]
operations = [
# backwards parents and ancestors caching
migrations.RunPython(migrations.RunPython.noop, rebuild_jt_parents),
# add new organization field for JT and all other unified jobs
migrations.AddField(
model_name='unifiedjob',
name='tmp_organization',
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this unified job.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobs', to='main.Organization'),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='tmp_organization',
field=models.ForeignKey(blank=True, help_text='The organization used to determine access to this template.', null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjobtemplates', to='main.Organization'),
),
# while new and old fields exist, copy the organization fields
migrations.RunPython(migrate_ujt_organization, migrate_ujt_organization_backward),
# with data saved, remove old fields
migrations.RemoveField(
model_name='project',
name='organization',
),
migrations.RemoveField(
model_name='workflowjobtemplate',
name='organization',
),
# now, without safely rename the new field without conflicts from old field
migrations.RenameField(
model_name='unifiedjobtemplate',
old_name='tmp_organization',
new_name='organization',
),
migrations.RenameField(
model_name='unifiedjob',
old_name='tmp_organization',
new_name='organization',
),
# parentage of job template roles has genuinely changed at this point
migrations.AlterField(
model_name='jobtemplate',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.job_template_admin_role'], related_name='+', to='main.Role'),
),
migrations.AlterField(
model_name='jobtemplate',
name='execute_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role', 'organization.execute_role'], related_name='+', to='main.Role'),
),
migrations.AlterField(
model_name='jobtemplate',
name='read_role',
field=awx.main.fields.ImplicitRoleField(editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], related_name='+', to='main.Role'),
),
# Re-compute the role parents and ancestors caching
migrations.RunPython(rebuild_jt_parents, migrations.RunPython.noop),
# for all permissions that will be removed, make them explicit
migrations.RunPython(restore_inventory_admins, restore_inventory_admins_backward),
]
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/migrations/0109_v370_job_template_organization_field.py
|
Python
|
apache-2.0
| 3,857 | 0.002074 |
"""
Player
The Player represents the game "account" and each login has only one
Player object. A Player is what chats on default channels but has no
other in-game-world existance. Rather the Player puppets Objects (such
as Characters) in order to actually participate in the game world.
Guest
Guest players are simple low-level accounts that are created/deleted
on the fly and allows users to test the game without the committment
of a full registration. Guest accounts are deactivated by default; to
activate them, add the following line to your settings file:
GUEST_ENABLED = True
You will also need to modify the connection screen to reflect the
possibility to connect with a guest account. The setting file accepts
several more options for customizing the Guest account system.
"""
from evennia import DefaultPlayer, DefaultGuest
class Player(DefaultPlayer):
"""
This class describes the actual OOC player (i.e. the user connecting
to the MUD). It does NOT have visual appearance in the game world (that
is handled by the character which is connected to this). Comm channels
are attended/joined using this object.
It can be useful e.g. for storing configuration options for your game, but
should generally not hold any character-related info (that's best handled
on the character level).
Can be set using BASE_PLAYER_TYPECLASS.
* available properties
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(text=None, **kwargs)
swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string, session=None)
search(ostring, global_search=False, attribute_name=None, use_nicks=False, location=None, ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hook methods (when re-implementation, remember methods need to have self as first arg)
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_cmdset_get(**kwargs)
at_first_login()
at_post_login(session=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
def at_look(self, target=None, session=None):
"""Disable the look command on players.
This is due to the fact that players are never in OOC
mode. When login through the menu, the user has to
select a character and then automatically puppets it.
Likewise, the @ooc and @ic commands are disabled.
"""
return ""
class Guest(DefaultGuest):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
pass
|
vlegoff/mud
|
typeclasses/players.py
|
Python
|
bsd-3-clause
| 4,163 | 0.001681 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for converting the Code2Seq dataset to a PLUR dataset.
"""
import os
import tarfile
import apache_beam as beam
from plur.stage_1.plur_dataset import Configuration
from plur.stage_1.plur_dataset import PlurDataset
from plur.utils import constants
from plur.utils import util
from plur.utils.graph_to_output_example import GraphToOutputExample
from plur.utils.graph_to_output_example import GraphToOutputExampleNotValidError
import tqdm
class Code2SeqDataset(PlurDataset):
# pylint: disable=line-too-long
"""Converting data from code2seq dataset to a PLUR dataset.
The dataset is used in: Alon, Uri, et al. 'code2seq: Generating sequences from
structured representations of code.' arXiv preprint arXiv:1808.01400 (2018).
The task is to predict the function name given the function body.
The provided dataset by code2seq are the tokenized function name, and the AST
paths. Therefore we have to create our own graph representation of code2seq.
We try to mimic the code2seq model by constructing a graph similar to figure
3 in the code2seq paper. An example of such graph is shown in
https://drive.google.com/file/d/1-cH0FzYIMikgTkUpzVkEZDGjoiqBB9C1/view?usp=sharing.
In short, we build the AST path subtree and connects all AST paths with a
code2seq root node to make it a graph.
"""
_URLS_SMALL = {
'java-small-preprocessed.tar.gz': {
'url': 'https://s3.amazonaws.com/code2seq/datasets/java-small-preprocessed.tar.gz',
'sha1sum': '857c2495785f606ab99676c7bbae601ea2160f66',
}
}
_URLS_MED = {
'java-med-preprocessed.tar.gz': {
'url': 'https://s3.amazonaws.com/code2seq/datasets/java-med-preprocessed.tar.gz',
'sha1sum': '219e558ddf46678ef322ff75bf1982faa1b6204d',
}
}
_URLS_LARGE = {
'java-large-preprocessed.tar.gz': {
'url': 'https://s3.amazonaws.com/code2seq/datasets/java-large-preprocessed.tar.gz',
'sha1sum': 'ebc229ba1838a3c8f3a69ab507eb26fa5460152a',
}
}
# pylint: enable=line-too-long
_GIT_URL = {}
_DATASET_NAME = 'code2seq_dataset'
_DATASET_DESCRIPTION = """\
This dataset is used to train the code2seq model. The task is to predict the
function name, given the ast paths sampled the function AST. An AST path is
a path between two leaf nodes in the AST.
"""
def __init__(self,
stage_1_dir,
configuration: Configuration = Configuration(),
transformation_funcs=(),
filter_funcs=(),
user_defined_split_range=(),
num_shards=1000,
seed=0,
dataset_size='small',
deduplicate=False):
# dataset_size can only be 'small', 'med' or 'large'.
valid_dataset_size = {'small', 'med', 'large'}
if dataset_size not in valid_dataset_size:
raise ValueError('{} not in {}'.format(dataset_size,
str(valid_dataset_size)))
if dataset_size == 'small':
urls = self._URLS_SMALL
elif dataset_size == 'med':
urls = self._URLS_MED
else:
urls = self._URLS_LARGE
self.dataset_size = dataset_size
super().__init__(self._DATASET_NAME, urls, self._GIT_URL,
self._DATASET_DESCRIPTION, stage_1_dir,
transformation_funcs=transformation_funcs,
filter_funcs=filter_funcs,
user_defined_split_range=user_defined_split_range,
num_shards=num_shards, seed=seed,
configuration=configuration, deduplicate=deduplicate)
def download_dataset(self):
"""Download the dataset using requests and extract the tarfile."""
super().download_dataset_using_requests()
# Extract the tarfile depending on the dataset size.
if self.dataset_size == 'small':
self.code2seq_extracted_dir = os.path.join(
self.raw_data_dir, 'java-small')
tarfile_name = 'java-small-preprocessed.tar.gz'
elif self.dataset_size == 'med':
self.code2seq_extracted_dir = os.path.join(
self.raw_data_dir, 'java-med')
tarfile_name = 'java-med-preprocessed.tar.gz'
else:
self.code2seq_extracted_dir = os.path.join(
self.raw_data_dir, 'java-large')
tarfile_name = 'java-large-preprocessed.tar.gz'
tarfiles_to_extract = []
tarfiles_to_extract = util.check_need_to_extract(
tarfiles_to_extract, self.code2seq_extracted_dir,
tarfile_name)
for filename in tarfiles_to_extract:
dest = os.path.join(self.raw_data_dir, filename)
with tarfile.open(dest, 'r:gz') as tf:
for member in tqdm.tqdm(
tf.getmembers(),
unit='file',
desc='Extracting {}'.format(filename)):
tf.extract(member, self.raw_data_dir)
def get_all_raw_data_paths(self):
"""Get paths to all raw data."""
# Get the filenames depending on the dataset size.
if self.dataset_size == 'small':
train_file = os.path.join(
self.code2seq_extracted_dir, 'java-small.train.c2s')
validation_file = os.path.join(
self.code2seq_extracted_dir, 'java-small.val.c2s')
test_file = os.path.join(
self.code2seq_extracted_dir, 'java-small.test.c2s')
elif self.dataset_size == 'med':
train_file = os.path.join(
self.code2seq_extracted_dir, 'java-med.train.c2s')
validation_file = os.path.join(
self.code2seq_extracted_dir, 'java-med.val.c2s')
test_file = os.path.join(
self.code2seq_extracted_dir, 'java-med.test.c2s')
else:
train_file = os.path.join(
self.code2seq_extracted_dir, 'java-large.train.c2s')
validation_file = os.path.join(
self.code2seq_extracted_dir, 'java-large.val.c2s')
test_file = os.path.join(
self.code2seq_extracted_dir, 'java-large.test.c2s')
return [train_file, validation_file, test_file]
def raw_data_paths_to_raw_data_do_fn(self):
"""Returns a beam.DoFn subclass that reads the raw data."""
return C2SExtractor(super().get_random_split,
bool(self.user_defined_split_range))
def _construct_token_subtree(self, graph_to_output_example, token,
cur_node_id, token_root_name):
# pylint: disable=line-too-long
"""Construct the token subtree in a AST path.
We create a node for each subtoken in the token, all subtokens are connected
to the next subtoken via the 'NEXT_SUBTOKEN' edge. All subtokens are
connected to the token root node via the 'SUBTOKEN' edge. See the draw.io
figure mentioned in the class doc for the visualization.
Args:
graph_to_output_example: A GraphToOutputExample instance.
token: Starting or ending token in the AST path.
cur_node_id: Next available node id.
token_root_name: Node type and label for the token root node.
Returns:
A tuple of graph_to_output_example, cur_node_id, token_node_id.
graph_to_output_example is updated with the token subtree, cur_node_id is
the next available node id after all the token subtree nodes are added,
and token_node_id is the node id of the root token node.
"""
subtokens = token.split('|')
subtoken_node_ids = []
prev_subtoken_id = -1
# Create a node each subtoken.
for subtoken in subtokens:
graph_to_output_example.add_node(cur_node_id, 'SUBTOKEN', subtoken)
subtoken_node_ids.append(cur_node_id)
# Connects to the previous subtoken node
if prev_subtoken_id != -1:
graph_to_output_example.add_edge(prev_subtoken_id, cur_node_id,
'NEXT_SUBTOKEN')
prev_subtoken_id = cur_node_id
cur_node_id += 1
# Add a root node for the token subtree.
graph_to_output_example.add_node(cur_node_id, token_root_name,
token_root_name)
token_node_id = cur_node_id
cur_node_id += 1
# Connect all subtoken nodes to the token subtree root node.
for node_id in subtoken_node_ids:
graph_to_output_example.add_edge(token_node_id, node_id, 'SUBTOKEN')
return graph_to_output_example, cur_node_id, token_node_id
def _construct_ast_nodes_subtree(self, graph_to_output_example, ast_nodes,
cur_node_id):
"""Construct the AST nodes subtree in a AST path.
We create a node for each AST node in the AST path. Each AST node are
connected to the next AST node via the 'NEXT_AST_NODE' edge. See the draw.io
figure mentioned in the class doc for the visualization.
Args:
graph_to_output_example: A GraphToOutputExample instance.
ast_nodes: AST nodes in the AST path.
cur_node_id: Current available node id.
Returns:
A tuple of graph_to_output_example, cur_node_id, ast_node_ids.
graph_to_output_example is updated with the ast nodes subtree,
cur_node_id is the next available node id after all the ast nodes are
added, and ast_node_ids the node ids of all AST nodes.
"""
ast_nodes = ast_nodes.split('|')
ast_node_ids = []
prev_ast_node_id = -1
# Create a node each AST node.
for ast_node in ast_nodes:
graph_to_output_example.add_node(cur_node_id, 'AST_NODE', ast_node)
ast_node_ids.append(cur_node_id)
# Connects to the previous AST node.
if prev_ast_node_id != -1:
graph_to_output_example.add_edge(prev_ast_node_id, cur_node_id,
'NEXT_AST_NODE')
prev_ast_node_id = cur_node_id
cur_node_id += 1
return graph_to_output_example, cur_node_id, ast_node_ids
def raw_data_to_graph_to_output_example(self, raw_data):
# pylint: disable=line-too-long
"""Convert raw data to the unified GraphToOutputExample data structure.
The Code2Seq raw data contains the target function name, and the sampled
AST paths. Each AST path starts and ends with a token, and a series of
AST nodes that connects the two tokens. We use _construct_token_subtree
to build the token subtree and _construct_ast_nodes_subtree to build the
AST nodes subtree. Then, all AST paths' nodes are connected to a AST root
node.
All AST root nodes are connected to a single code2seq root node.
https://drive.google.com/file/d/1-cH0FzYIMikgTkUpzVkEZDGjoiqBB9C1/view?usp=sharing
shows an example of such a graph and the original AST path.
Args:
raw_data: A dictionary with 'split', 'target_label' and 'ast_paths' as keys.
The value of the 'split' field is the split (train/valid/test) that the
data belongs to. The value of the 'target_label' field is the function
name. The value of the 'ast_paths' field is a list of AST paths.
Raises:
GraphToOutputExampleNotValidError if the GraphToOutputExample is not
valid.
Returns:
A dictionary with keys 'split' and 'GraphToOutputExample'. Values are the
split(train/validation/test) the data belongs to, and the
GraphToOutputExample instance.
"""
# pylint: enable=line-too-long
split = raw_data['split']
target_label = raw_data['target_label']
ast_paths = raw_data['ast_paths']
graph_to_output_example = GraphToOutputExample()
cur_node_id = 0
ast_path_root_node_ids = []
# This is the root node of all AST path nodes.
graph_to_output_example.add_node(cur_node_id, 'C2C_ROOT', 'C2C_ROOT')
c2c_root_node_id = cur_node_id
cur_node_id += 1
for ast_path in ast_paths:
# The start_token subtree
start_token = ast_path[0]
graph_to_output_example, cur_node_id, start_token_node_id = (
self._construct_token_subtree(
graph_to_output_example, start_token, cur_node_id, 'START_TOKEN'))
# The ast_nodes subtree
ast_nodes = ast_path[1]
graph_to_output_example, cur_node_id, ast_node_ids = (
self._construct_ast_nodes_subtree(
graph_to_output_example, ast_nodes, cur_node_id))
# The end_token subtree
end_token = ast_path[2]
graph_to_output_example, cur_node_id, end_token_node_id = (
self._construct_token_subtree(
graph_to_output_example, end_token, cur_node_id, 'END_TOKEN'))
# Connects the start_token root node with the first node in the
# ast_nodes subtree.
graph_to_output_example.add_edge(
start_token_node_id, ast_node_ids[0], 'START_AST_PATH')
# Connects the end_token root node with the last node in the
# ast_nodes subtree.
graph_to_output_example.add_edge(
end_token_node_id, ast_node_ids[-1], 'END_AST_PATH')
# Add a root AST path node representing the AST path.
graph_to_output_example.add_node(
cur_node_id, 'ROOT_AST_PATH', 'ROOT_AST_PATH')
ast_path_root_node_id = cur_node_id
ast_path_root_node_ids.append(ast_path_root_node_id)
cur_node_id += 1
# Connects the root AST path node with the start_token and end_token
# subtree.
graph_to_output_example.add_edge(
ast_path_root_node_id, start_token_node_id, 'START_TOKEN')
graph_to_output_example.add_edge(
ast_path_root_node_id, end_token_node_id, 'END_TOKEN')
# Connects the root AST path node with all nodes in the ast_nodes subtree.
for node_id in ast_node_ids:
graph_to_output_example.add_edge(ast_path_root_node_id, node_id,
'AST_NODE')
# Connects the code2seq root node with all AST path root node.
for ast_path_root_node_id in ast_path_root_node_ids:
graph_to_output_example.add_edge(c2c_root_node_id, ast_path_root_node_id,
'AST_PATH')
for subtoken in target_label.split('|'):
graph_to_output_example.add_token_output(subtoken)
for transformation_fn in self.transformation_funcs:
graph_to_output_example = transformation_fn(graph_to_output_example)
if not graph_to_output_example.check_if_valid():
raise GraphToOutputExampleNotValidError(
'Invalid GraphToOutputExample found {}'.format(
graph_to_output_example))
for filter_fn in self.filter_funcs:
if not filter_fn(graph_to_output_example):
graph_to_output_example = None
break
return {'split': split, 'GraphToOutputExample': graph_to_output_example}
class C2SExtractor(beam.DoFn):
"""Class to read the code2seq dataset."""
def __init__(self, random_split_fn, use_random_split):
self.random_split_fn = random_split_fn
self.use_random_split = use_random_split
def _read_data(self, file_path):
"""Read and parse the code2seq raw data file.
Each line in the code2seq raw data file has the following format:
'<token> <token>,<node1>,<node2>,<token> <token>,<node3>,<token>'
The first token is the function name. The rest are the AST paths, separated
with a whitespace.
Args:
file_path: Path to a code2seq data file.
Yields:
A tuple of the function name, and a list of AST paths.
"""
with open(file_path) as f:
for line in f:
fields = line.rstrip().split(' ')
# The subtokens are still separated by '|', we handle them
# together in self.raw_data_to_graph_to_output_example()
target_label = fields[0]
ast_paths = []
for field in fields[1:]:
if field:
# The subtokens are still separated by '|', we handle them
# together in self.raw_data_to_graph_to_output_example()
ast_paths.append(field.split(','))
yield target_label, ast_paths
def _get_split(self, file_path):
"""Get the data split based on the filename suffix."""
if file_path.endswith('train.c2s'):
return constants.TRAIN_SPLIT_NAME
elif file_path.endswith('val.c2s'):
return constants.VALIDATION_SPLIT_NAME
else:
return constants.TEST_SPLIT_NAME
def process(self, file_path):
split = self._get_split(file_path)
for target_label, ast_paths in self._read_data(file_path):
yield {
'split': self.random_split_fn() if self.use_random_split else split,
'target_label': target_label,
'ast_paths': ast_paths
}
|
google-research/plur
|
plur/stage_1/code2seq_dataset.py
|
Python
|
apache-2.0
| 16,900 | 0.006154 |
#!/usr/bin/env python3
import asyncio
import pq
class Countdown(pq.Ahsm):
def __init__(self, count=3):
super().__init__(Countdown.initial)
self.count = count
@pq.Hsm.state
def initial(me, event):
print("initial")
me.te = pq.TimeEvent("TIME_TICK")
return me.tran(me, Countdown.counting)
@pq.Hsm.state
def counting(me, event):
sig = event.signal
if sig == pq.Signal.ENTRY:
print("counting")
me.te.postIn(me, 1.0)
return me.handled(me, event)
elif sig == pq.Signal.TIME_TICK:
print(me.count)
if me.count == 0:
return me.tran(me, Countdown.done)
else:
me.count -= 1
me.te.postIn(me, 1.0)
return me.handled(me, event)
return me.super(me, me.top)
@pq.Hsm.state
def done(me, event):
sig = event.signal
if sig == pq.Signal.ENTRY:
print("done")
pq.Framework.stop()
return me.handled(me, event)
return me.super(me, me.top)
if __name__ == "__main__":
sl = Countdown(10)
sl.start(0)
loop = asyncio.get_event_loop()
loop.run_forever()
loop.close()
|
dwhall/pq
|
examples/countdown.py
|
Python
|
mit
| 1,269 | 0.002364 |
# -*- coding:utf-8 -*-
# @author xupingmao
# @since 2021/12/05 11:25:18
# @modified 2022/01/24 14:47:38
# @filename dbutil_sortedset.py
"""【待实现】有序集合,用于各种需要排名的场景,比如
- 最近编辑的笔记
- 访问次数最多的笔记
如果使用了LdbTable的索引功能,其实就不需要这个了
"""
from xutils.dbutil_base import *
from xutils.dbutil_hash import LdbHashTable
register_table("_rank", "排名表")
class RankTable:
def __init__(self, table_name, user_name = None):
check_table_name(table_name)
self.table_name = table_name
self.prefix = "_rank:" + table_name
if user_name != None and user_name != "":
self.prefix += ":" + user_name
if self.prefix[-1] != ":":
self.prefix += ":"
def _format_score(self, score):
if score is None:
return "$"
if isinstance(score, int):
return "%020d" % score
if isinstance(score, str):
return "%020s" % score
raise Exception("_format_score: unsupported score (%r)" % score)
def put(self, member, score, batch = None):
score_str = self._format_score(score)
key = self.prefix + str(score) + ":" + member
if batch != None:
batch.put(key, member)
else:
put(key, member)
def delete(self, member, score, batch = None):
score_str = self._format_score(score)
key = self.prefix + str(score) + ":" + member
if batch != None:
batch.delete(key)
else:
delete(key)
def list(self, offset = 0, limit = 10, reverse = False):
return prefix_list(self.prefix, offset = offset,
limit = limit, reverse = reverse)
class LdbSortedSet:
def __init__(self, table_name, user_name = None, key_name = "_key"):
# key-value的映射
self.member_dict = LdbHashTable(table_name, user_name)
# score的排名
self.rank = RankTable(table_name, user_name)
def put(self, member, score):
"""设置成员分值"""
with get_write_lock(member):
batch = create_write_batch()
old_score = self.member_dict.get(member)
self.member_dict.put(member, score, batch = batch)
if old_score != score:
self.rank.delete(member, old_score, batch = batch)
self.rank.put(member, score, batch = batch)
commit_write_batch(batch)
def get(self, member):
return self.member_dict.get(member)
def delete(self, member):
with get_write_lock(member):
batch = create_write_batch()
old_score = self.member_dict.get(member)
if old_score != None:
self.member_dict.delete(member, batch = batch)
self.rank.delete(member, old_score, batch = batch)
commit_write_batch(batch)
def list_by_score(self, *args, **kw):
result = []
for member in self.rank.list(*args, **kw):
item = (member, self.get(member))
result.append(item)
return result
|
xupingmao/xnote
|
xutils/dbutil_sortedset.py
|
Python
|
gpl-3.0
| 3,154 | 0.013316 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bar.ui'
#
# Created: Thu Aug 11 10:41:59 2011
# by: PyQt4 UI code generator 4.8.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_BarConfigDialog(object):
def setupUi(self, BarConfigDialog):
BarConfigDialog.setObjectName(_fromUtf8("BarConfigDialog"))
BarConfigDialog.resize(341, 241)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(BarConfigDialog.sizePolicy().hasHeightForWidth())
BarConfigDialog.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/icons/programIcon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
BarConfigDialog.setWindowIcon(icon)
self.verticalLayout_4 = QtGui.QVBoxLayout(BarConfigDialog)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lblFieldToPlot = QtGui.QLabel(BarConfigDialog)
self.lblFieldToPlot.setObjectName(_fromUtf8("lblFieldToPlot"))
self.horizontalLayout.addWidget(self.lblFieldToPlot)
self.cboFieldToPlot = QtGui.QComboBox(BarConfigDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cboFieldToPlot.sizePolicy().hasHeightForWidth())
self.cboFieldToPlot.setSizePolicy(sizePolicy)
self.cboFieldToPlot.setObjectName(_fromUtf8("cboFieldToPlot"))
self.horizontalLayout.addWidget(self.cboFieldToPlot)
self.chkSort = QtGui.QCheckBox(BarConfigDialog)
self.chkSort.setObjectName(_fromUtf8("chkSort"))
self.horizontalLayout.addWidget(self.chkSort)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.groupBox_3 = QtGui.QGroupBox(BarConfigDialog)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lblFigureWidth = QtGui.QLabel(self.groupBox_3)
self.lblFigureWidth.setObjectName(_fromUtf8("lblFigureWidth"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblFigureWidth)
self.spinFigWidth = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigWidth.setDecimals(2)
self.spinFigWidth.setMinimum(2.0)
self.spinFigWidth.setMaximum(20.0)
self.spinFigWidth.setSingleStep(0.5)
self.spinFigWidth.setProperty(_fromUtf8("value"), 8.5)
self.spinFigWidth.setObjectName(_fromUtf8("spinFigWidth"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.spinFigWidth)
self.lblFigureHeight = QtGui.QLabel(self.groupBox_3)
self.lblFigureHeight.setObjectName(_fromUtf8("lblFigureHeight"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblFigureHeight)
self.spinFigRowHeight = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigRowHeight.setMinimum(0.1)
self.spinFigRowHeight.setMaximum(1.0)
self.spinFigRowHeight.setSingleStep(0.05)
self.spinFigRowHeight.setProperty(_fromUtf8("value"), 0.25)
self.spinFigRowHeight.setObjectName(_fromUtf8("spinFigRowHeight"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.spinFigRowHeight)
self.verticalLayout_3.addLayout(self.formLayout_2)
self.horizontalLayout_2.addWidget(self.groupBox_3)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.groupBox = QtGui.QGroupBox(BarConfigDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.radioLegendPosUpperLeft = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosUpperLeft.setObjectName(_fromUtf8("radioLegendPosUpperLeft"))
self.gridLayout.addWidget(self.radioLegendPosUpperLeft, 0, 2, 1, 1)
self.radioLegendPosCentreLeft = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosCentreLeft.setObjectName(_fromUtf8("radioLegendPosCentreLeft"))
self.gridLayout.addWidget(self.radioLegendPosCentreLeft, 0, 3, 1, 1)
self.radioLegendPosLowerLeft = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosLowerLeft.setChecked(False)
self.radioLegendPosLowerLeft.setObjectName(_fromUtf8("radioLegendPosLowerLeft"))
self.gridLayout.addWidget(self.radioLegendPosLowerLeft, 0, 4, 1, 1)
self.radioLegendPosUpperRight = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosUpperRight.setObjectName(_fromUtf8("radioLegendPosUpperRight"))
self.gridLayout.addWidget(self.radioLegendPosUpperRight, 1, 2, 1, 1)
self.radioLegendPosCentreRight = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosCentreRight.setObjectName(_fromUtf8("radioLegendPosCentreRight"))
self.gridLayout.addWidget(self.radioLegendPosCentreRight, 1, 3, 1, 1)
self.radioLegendPosLowerRight = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosLowerRight.setObjectName(_fromUtf8("radioLegendPosLowerRight"))
self.gridLayout.addWidget(self.radioLegendPosLowerRight, 1, 4, 1, 1)
self.radioLegendPosBest = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosBest.setChecked(True)
self.radioLegendPosBest.setObjectName(_fromUtf8("radioLegendPosBest"))
self.gridLayout.addWidget(self.radioLegendPosBest, 1, 1, 1, 1)
self.radioLegendPosNone = QtGui.QRadioButton(self.groupBox)
self.radioLegendPosNone.setObjectName(_fromUtf8("radioLegendPosNone"))
self.gridLayout.addWidget(self.radioLegendPosNone, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout_4.addWidget(self.groupBox)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem = QtGui.QSpacerItem(100, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(BarConfigDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.horizontalLayout_3.addWidget(self.buttonBox)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.retranslateUi(BarConfigDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), BarConfigDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), BarConfigDialog.reject)
QtCore.QMetaObject.connectSlotsByName(BarConfigDialog)
def retranslateUi(self, BarConfigDialog):
BarConfigDialog.setWindowTitle(QtGui.QApplication.translate("BarConfigDialog", "Bar plot", None, QtGui.QApplication.UnicodeUTF8))
self.lblFieldToPlot.setText(QtGui.QApplication.translate("BarConfigDialog", "Field to plot:", None, QtGui.QApplication.UnicodeUTF8))
self.chkSort.setText(QtGui.QApplication.translate("BarConfigDialog", "Sort values", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("BarConfigDialog", "Figure size", None, QtGui.QApplication.UnicodeUTF8))
self.lblFigureWidth.setText(QtGui.QApplication.translate("BarConfigDialog", "Width:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFigureHeight.setText(QtGui.QApplication.translate("BarConfigDialog", "Row height:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("BarConfigDialog", "Legend position", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosUpperLeft.setText(QtGui.QApplication.translate("BarConfigDialog", "Upper left", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosCentreLeft.setText(QtGui.QApplication.translate("BarConfigDialog", "Centre left", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosLowerLeft.setText(QtGui.QApplication.translate("BarConfigDialog", "Lower left", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosUpperRight.setText(QtGui.QApplication.translate("BarConfigDialog", "Upper right", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosCentreRight.setText(QtGui.QApplication.translate("BarConfigDialog", "Centre right", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosLowerRight.setText(QtGui.QApplication.translate("BarConfigDialog", "Lower right", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosBest.setText(QtGui.QApplication.translate("BarConfigDialog", "Best", None, QtGui.QApplication.UnicodeUTF8))
self.radioLegendPosNone.setText(QtGui.QApplication.translate("BarConfigDialog", "None", None, QtGui.QApplication.UnicodeUTF8))
|
dparks1134/STAMP
|
stamp/plugins/samples/plots/configGUI/barUI.py
|
Python
|
gpl-3.0
| 9,975 | 0.00381 |
from src.module.deploy_utils import parse_war_path
from commands import getoutput
from log import LOG
import utility
def invoke(fingerengine, fingerprint, deployer):
"""
"""
if fingerengine.service in ["jboss", "tomcat"]:
return invoke_war(fingerengine, fingerprint)
elif fingerengine.service in ["coldfusion"]:
return invoke_cf(fingerengine, fingerprint, deployer)
else:
utility.Msg("Platform %s does not support --invoke" %
fingerengine.options.remote_service, LOG.ERROR)
def invoke_war(fingerengine, fingerprint):
""" Invoke a deployed WAR file on the remote server.
This uses unzip because Python's zip module isn't very portable or
fault tolerant; i.e. it fails to parse msfpayload-generated WARs, though
this is a fault of metasploit, not the Python module.
"""
dfile = fingerengine.options.deploy
jsp = getoutput("unzip -l %s | grep jsp" % dfile).split(' ')[-1]
if jsp == '':
utility.Msg("Failed to find a JSP in the deployed WAR", LOG.DEBUG)
return
else:
utility.Msg("Using JSP {0} from {1} to invoke".format(jsp, dfile), LOG.DEBUG)
url = "http://{0}:{1}/{2}/{3}".format(fingerengine.options.ip,
fingerprint.port,
parse_war_path(dfile),
jsp)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(parse_war_path(dfile, True)),
LOG.ERROR)
def invoke_cf(fingerengine, fingerprint, deployer):
"""
"""
dfile = parse_war_path(fingerengine.options.deploy, True)
if fingerprint.version in ["10.0"]:
# deployments to 10 require us to trigger a 404
url = "http://{0}:{1}/CFIDE/ad123.cfm".format(fingerengine.options.ip,
fingerprint.port)
elif fingerprint.version in ["8.0"] and "fck_editor" in deployer.__name__:
# invoke a shell via FCKeditor deployer
url = "http://{0}:{1}/userfiles/file/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
else:
url = "http://{0}:{1}/CFIDE/{2}".format(fingerengine.options.ip,
fingerprint.port,
dfile)
if _invoke(url):
utility.Msg("{0} invoked at {1}".format(dfile, fingerengine.options.ip))
else:
utility.Msg("Failed to invoke {0}".format(dfile), LOG.ERROR)
def _invoke(url):
""" Make the request
"""
status = False
try:
response = utility.requests_get(url)
if response.status_code == 200:
status = True
except Exception, e:
utility.Msg("Failed to invoke payload: %s" % e, LOG.ERROR)
status = False
return status
|
breenmachine/clusterd
|
src/module/invoke_payload.py
|
Python
|
mit
| 3,120 | 0.003526 |
#!/usr/bin/env python
import sys
import os
import threading
import traceback
import json
import multiprocessing
import subprocess
import http
import html
import urllib
import argparse
from .aserver import AsyncCache, AsyncTCPServer, AsyncHTTPRequestHandler
from ..fpbench import fpcparser
from ..arithmetic import native, np
from ..arithmetic import softfloat, softposit
from ..arithmetic import ieee754, posit
from ..arithmetic import sinking
from ..arithmetic import canonicalize
from ..arithmetic import evalctx
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, 'index.html'), 'rb') as f:
index = f.read()
with open(os.path.join(here, 'evaluate.html'), 'rb') as f:
evaluate_page = f.read()
with open(os.path.join(here, 'translate.html'), 'rb') as f:
translate_page = f.read()
with open(os.path.join(here, 'titanic.css'), 'rb') as f:
css = f.read()
with open(os.path.join(here, 'titanfp.min.js'), 'rb') as f:
bundle = f.read()
with open(os.path.join(here, '../../../www/favicon.ico'), 'rb') as f:
favicon = f.read()
with open(os.path.join(here, '../../../www/piceberg_round.png'), 'rb') as f:
logo = f.read()
fpbench_root = '/home/bill/private/research/origin-FPBench'
fpbench_tools = os.path.join(fpbench_root, 'tools')
fpbench_benchmarks = os.path.join(fpbench_root, 'benchmarks')
def run_tool(toolname, core, *args):
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, toolname), *args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate(input=core.sexp.encode('utf-8'))
success = True
retval = tool.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(tool.args), retval),
file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def filter_cores(*args, benchmark_dir = fpbench_benchmarks):
if not os.path.isdir(benchmark_dir):
raise ValueError('{}: not a directory'.format(benchmark_dir))
names = os.listdir(benchmark_dir)
benchmark_files = [name for name in names
if name.lower().endswith('.fpcore')
and os.path.isfile(os.path.join(benchmark_dir, name))]
cat = subprocess.Popen(
cwd=benchmark_dir,
args=['cat', *benchmark_files],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
cat.stdin.close()
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, 'filter.rkt'), *args],
stdin=cat.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate()
# cleanup
success = True
for proc in [cat, tool]:
retval = proc.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(proc.args), retval),
file=sys.stderr, flush=True)
cat_stderr_data = cat.stderr.read()
cat.stderr.close()
if cat_stderr_data:
print(cat_stderr_data, file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def demo_tool(success, output):
if success:
return output
else:
return 'Error - tool subprocess returned nonzero value'
def demo_arith(evaluator, arguments, core, ctx=None):
if arguments is None:
try:
return str(evaluator(core))
except Exception:
print('Exception in FPCore evaluation\n evaluator={}\n args={}\n core={}'
.format(repr(evaluator), repr(arguments), core.sexp))
traceback.print_exc()
return 'Error evaluating FPCore.'
else:
inputs = arguments.strip().split()
if len(inputs) != len(core.inputs):
return 'Error - wrong number of arguments (core expects {:d})'.format(len(core.inputs))
try:
return str(evaluator(core, inputs, ctx))
except Exception:
print('Exception in FPCore evaluation\n evaluator={}\n args={}\n core={}'
.format(repr(evaluator), repr(arguments), core.sexp))
traceback.print_exc()
return 'Error evaluating FPCore.'
class RaisingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError('unable to parse inputs')
DEFAULT_PROPAGATE = {'precision', 'round', 'math-library'}
DEFAULT_RECURSE = {'pre', 'spec'}
def parse_canon_args(args):
parser = RaisingArgumentParser(add_help=False)
parser.add_argument('--default', action='store_true')
parser.add_argument('--recurse', type=str, nargs='*')
parser.add_argument('--propagate', type=str, nargs='*')
ns = parser.parse_args(args.strip().split())
if ns.recurse is None and ns.propagate is None:
return DEFAULT_RECURSE, DEFAULT_PROPAGATE
if ns.recurse is None:
recurse = set()
else:
recurse = set(ns.recurse)
if ns.propagate is None:
propagate = set()
else:
propagate = set(ns.propagate)
if ns.default:
recurse.update(DEFAULT_RECURSE)
propagate.update(DEFAULT_PROPAGATE)
return recurse, propagate
def demo_canon(evaluator, arguments, core, use_prop=False):
try:
recurse, propagate = parse_canon_args(arguments)
except Exception:
print('Exception parsing arguments for canonicalizer: {}'.format(repr(arguments)))
traceback.print_exc()
return 'Error parsing arguments.'
try:
if use_prop:
return evaluator(core, recurse=recurse, propagate=propagate).sexp
else:
return evaluator(core, recurse=recurse).sexp
except Exception:
print('Exception in FPCore translation\n translator={}\n recurse={}\n propagate={}\n use_prop={}\n core={}'
.format(repr(evaluator), repr(recurse), repr(propagate), repr(use_prop), core.sexp))
traceback.print_exc()
return 'Error translating FPCore.'
class TitanfpHTTPRequestHandler(AsyncHTTPRequestHandler):
def import_core_from_query(self, content, query):
qd = urllib.parse.parse_qs(query)
return content.decode('utf-8').format(qd.get('core', [''])[-1]).encode('utf-8')
def construct_content(self, data):
pr = self.translate_path()
if pr.path == '/titanfp.min.js':
response = http.server.HTTPStatus.OK
msg = None
headers = (
('Content-Type', 'text/javascript'),
)
content = bundle
elif pr.path == '/titanic.css':
response = http.server.HTTPStatus.OK
msg = None
headers = (
('Content-Type', 'text/css'),
)
content = css
else:
response = http.server.HTTPStatus.OK
msg = None
if data is None:
if pr.path == '/favicon.ico':
headers = (
('Content-Type', 'image/x-icon'),
)
content = favicon
elif pr.path == '/piceberg_round.png':
headers = (
('Content-Type', 'image/png'),
)
content = logo
# elif pr.path == '/evaluate':
else:
headers = (
('Content-Type', 'text/html'),
)
content = self.import_core_from_query(evaluate_page, pr.query)
# elif pr.path == '/translate':
# headers = (
# ('Content-Type', 'text/html'),
# )
# content = self.import_core_from_query(translate_page, pr.query)
# else:
# print(pr)
# headers = (
# ('Content-Type', 'text/html'),
# )
# content = index
else:
try:
payload = json.loads(data.decode('utf-8'))
except Exception as e:
print('Malformed data payload:\n{}'.format(repr(data)))
traceback.print_exc()
try:
core = fpcparser.compile(payload['core'])[0]
except Exception:
print('Exception parsing FPCore {}'.format(repr(payload['core'])))
traceback.print_exc()
core = None
output = 'Error - unable to parse FPCore'
try:
if core is not None:
backend = payload['backend']
if backend == 'sink':
ctx = ieee754.ieee_ctx(int(payload['w']), int(payload['p']))
output = demo_arith(sinking.Interpreter.interpret, payload['inputs'], core, ctx)
elif backend == 'ieee754':
ctx = ieee754.ieee_ctx(int(payload['w']), int(payload['p']))
output = demo_arith(ieee754.Interpreter.interpret, payload['inputs'], core, ctx)
elif backend == 'posit':
ctx = posit.posit_ctx(int(payload['es']), int(payload['nbits']))
output = demo_arith(posit.Interpreter.interpret, payload['inputs'], core, ctx)
elif backend == 'native':
output = demo_arith(native.Interpreter.interpret, payload['inputs'], core)
elif backend == 'np':
output = demo_arith(np.Interpreter.interpret, payload['inputs'], core)
elif backend == 'softfloat':
output = demo_arith(softfloat.Interpreter.interpret, payload['inputs'], core)
elif backend == 'softposit':
output = demo_arith(softposit.Interpreter.interpret, payload['inputs'], core)
elif backend == 'canonicalize':
output = demo_canon(canonicalize.Canonicalizer.translate, payload['inputs'], core, use_prop=True)
elif backend == 'condense':
output = demo_canon(canonicalize.Condenser.translate, payload['inputs'], core, use_prop=False)
elif backend == 'minimize':
output = demo_canon(canonicalize.Minimizer.translate, payload['inputs'], core, use_prop=False)
elif backend == 'fpcore':
inputs = payload['inputs'].strip().split()
if len(inputs) != len(core.inputs):
output = 'Error - wrong number of arguments (core expects {:d})'.format(len(core.inputs))
else:
output = demo_tool(*run_tool('fpcore.rkt', core, *inputs))
elif backend == 'core2c':
output = demo_tool(*run_tool('core2c.rkt', core))
elif backend == 'core2js':
output = demo_tool(*run_tool('core2js.rkt', core))
elif backend == 'core2smtlib2':
output = demo_tool(*run_tool('core2smtlib2.rkt', core))
# elif backend == 'filter':
# inputs = payload['inputs'].strip().split()
# output = demo_tool(*filter_cores(*inputs))
else:
output = 'Unknown backend ' + repr(backend)
except Exception as e:
print('Exception running backend\n payload={}'.format(repr(payload)))
traceback.print_exc()
output = 'Error running backend.'
headers = (
('Content-Type', 'text/plain'),
)
content = html.escape(str(output)).encode('utf-8')
return response, msg, headers, content
def run():
import argparse
ncores = os.cpu_count()
#default_pool_size = max(1, min(ncores - 1, (ncores // 2) + 1))
default_pool_size = 2
parser = argparse.ArgumentParser()
parser.add_argument('--cache', type=int, default=1,
help='number of requests to cache')
parser.add_argument('--workers', type=int, default=default_pool_size,
help='number of worker processes to run in parallel')
parser.add_argument('--host', type=str, default='localhost',
help='server host')
parser.add_argument('--port', type=int, default=8000,
help='server port')
args = parser.parse_args()
cache = AsyncCache(args.cache)
with multiprocessing.Pool(args.workers, maxtasksperchild=100) as pool:
class CustomHTTPRequestHandler(TitanfpHTTPRequestHandler):
the_cache = cache
the_pool = pool
print('caching {:d} requests'.format(args.cache))
print('{:d} worker processes'.format(args.workers))
with AsyncTCPServer((args.host, args.port,), CustomHTTPRequestHandler) as server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print('server on thread:', server_thread.name)
print('close stdin to stop.')
for line in sys.stdin:
pass
print('stdin closed, stopping.')
pool.close()
print('workers closing...')
pool.join()
print('workers joined successfully.')
server.shutdown()
print('goodbye!')
if __name__ == '__main__':
run()
|
billzorn/fpunreal
|
titanfp/web/old_webdemo.py
|
Python
|
mit
| 14,240 | 0.00302 |
from . import shim
tmpdir = None
def setUp():
global tmpdir
tmpdir = shim.setup_shim_for('kubectl')
def tearDown():
global tmpdir
shim.teardown_shim_dir(tmpdir)
|
SpectoLabs/myna
|
contrib/python-myna/myna/__init__.py
|
Python
|
apache-2.0
| 180 | 0.011111 |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .doc import selfdoc # noqa: F401
from .elaborate import elaborate # noqa: F401
from .generate import generate # noqa: F401
from .generate_tb import generate_tb # noqa: F401
from .item import Edge, Node, NodeType # noqa: F401
from .validate import validate # noqa: F401
from .xbar import Xbar # noqa: F401
|
lowRISC/opentitan
|
util/tlgen/__init__.py
|
Python
|
apache-2.0
| 465 | 0 |
import boardgame as bg
class Card(bg.GamePiece):
pass
|
tcstewar/boardgame
|
boardgame/card.py
|
Python
|
gpl-2.0
| 59 | 0.016949 |
import pytest
from linguee_api.const import LANGUAGE_CODE, LANGUAGES
from linguee_api.linguee_client import LingueeClient
from linguee_api.models import SearchResult
@pytest.mark.asyncio
async def test_linguee_client_should_redirect_on_not_found(
linguee_client: LingueeClient,
):
search_result = await linguee_client.process_search_result(
query="constibado", src="pt", dst="en", guess_direction=False
)
assert search_result.query == "constipado"
@pytest.mark.asyncio
@pytest.mark.parametrize("lang", list(LANGUAGES.keys()))
async def test_linguee_client_should_process_test_requests(
linguee_client: LingueeClient,
lang: LANGUAGE_CODE,
):
search_result = await linguee_client.process_search_result(
query="test", src="en", dst=lang, guess_direction=False
)
assert isinstance(search_result, SearchResult)
|
imankulov/linguee-api
|
tests/test_linguee_client.py
|
Python
|
mit
| 863 | 0 |
import discord
from discord.ext import commands
import time
import datetime
import pytz
class GameTime(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def time(self, ctx):
"""Displays current game time."""
locationName = self.bot.db.get_val("ServerInfo", "")
print(type(locationName))
print(locationName['CityName'])
embed = discord.Embed(title="Current time in {}".format(locationName['CityName']),description=get_gametime())
await ctx.send(embed=embed)
await ctx.message.delete_message()
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def get_rawtime():
return datetime.datetime.now(pytz.timezone('UTC'))
def get_gametime():
months = [
"Hammer",
"Alturiak",
"Ches",
"Tarsakh",
"Mirtul",
"Kythorn",
"Flamerule",
"Eleasis",
"Eleint",
"Marpenoth",
"Uktar",
"Nightal"]
aDate = datetime(2020, 10, 18, tzinfo=pytz.timezone('UTC'))
bDate = datetime.now(pytz.timezone('UTC'))
delta = bDate - aDate
gametime = datetime(2020, 10, 18, bDate.hour, bDate.minute, bDate.second) + timedelta(days=delta.days*3) + (timedelta(days=(bDate.hour//8-2)))
if gametime.hour == 0:
gametime_hour = 12
time_decor = "AM"
else:
gametime_hour = gametime.hour-12 if gametime.hour > 12 else gametime.hour
time_decor = "PM" if gametime.hour > 12 else "AM"
gametime_minute = "0{}".format(gametime.minute) if gametime.minute < 10 else gametime.minute
return "{}:{} {} UTC | {}{} of {}".format(gametime_hour, gametime_minute, time_decor, gametime.day, suffix(gametime.day), months[gametime.month-1])
def setup(bot):
bot.add_cog(GameTime(bot))
|
Eylesis/Botfriend
|
Cogs/GameTime.py
|
Python
|
mit
| 1,883 | 0.012746 |
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', '-s', 'tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='abenity',
packages=['abenity'],
version='0.0.4',
description='Abenity API client',
long_description='A Python library for using the Abenity API.',
url='https://github.com/casbeebc/abenity-python',
author='Brett Casbeer',
author_email='brett.casbeer@gmail.com',
license='MIT',
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
setup_requires=['setuptools>=17.1'],
install_requires=['requests==2.20.0',
'pycryptodome==3.6.6',
'six==1.10.0'],
extras_require={'testing': ['pytest']},
tests_require=['pytest'],
)
|
casbeebc/abenity-python
|
setup.py
|
Python
|
mit
| 1,394 | 0 |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <nils@we155.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = Unified Diagnostic Service (UDS)
# scapy.contrib.status = loads
import struct
import time
from itertools import product
from scapy.fields import ByteEnumField, StrField, ConditionalField, \
BitEnumField, BitField, XByteField, FieldListField, \
XShortField, X3BytesField, XIntField, ByteField, \
ShortField, ObservableDict, XShortEnumField, XByteEnumField, StrLenField, \
FieldLenField
from scapy.packet import Packet, bind_layers, NoPayload
from scapy.config import conf
from scapy.error import log_loading
from scapy.utils import PeriodicSenderThread
from scapy.contrib.isotp import ISOTP
from scapy.compat import Dict, Union
"""
UDS
"""
try:
if conf.contribs['UDS']['treat-response-pending-as-answer']:
pass
except KeyError:
log_loading.info("Specify \"conf.contribs['UDS'] = "
"{'treat-response-pending-as-answer': True}\" to treat "
"a negative response 'requestCorrectlyReceived-"
"ResponsePending' as answer of a request. \n"
"The default value is False.")
conf.contribs['UDS'] = {'treat-response-pending-as-answer': False}
class UDS(ISOTP):
services = ObservableDict(
{0x10: 'DiagnosticSessionControl',
0x11: 'ECUReset',
0x14: 'ClearDiagnosticInformation',
0x19: 'ReadDTCInformation',
0x22: 'ReadDataByIdentifier',
0x23: 'ReadMemoryByAddress',
0x24: 'ReadScalingDataByIdentifier',
0x27: 'SecurityAccess',
0x28: 'CommunicationControl',
0x2A: 'ReadDataPeriodicIdentifier',
0x2C: 'DynamicallyDefineDataIdentifier',
0x2E: 'WriteDataByIdentifier',
0x2F: 'InputOutputControlByIdentifier',
0x31: 'RoutineControl',
0x34: 'RequestDownload',
0x35: 'RequestUpload',
0x36: 'TransferData',
0x37: 'RequestTransferExit',
0x38: 'RequestFileTransfer',
0x3D: 'WriteMemoryByAddress',
0x3E: 'TesterPresent',
0x50: 'DiagnosticSessionControlPositiveResponse',
0x51: 'ECUResetPositiveResponse',
0x54: 'ClearDiagnosticInformationPositiveResponse',
0x59: 'ReadDTCInformationPositiveResponse',
0x62: 'ReadDataByIdentifierPositiveResponse',
0x63: 'ReadMemoryByAddressPositiveResponse',
0x64: 'ReadScalingDataByIdentifierPositiveResponse',
0x67: 'SecurityAccessPositiveResponse',
0x68: 'CommunicationControlPositiveResponse',
0x6A: 'ReadDataPeriodicIdentifierPositiveResponse',
0x6C: 'DynamicallyDefineDataIdentifierPositiveResponse',
0x6E: 'WriteDataByIdentifierPositiveResponse',
0x6F: 'InputOutputControlByIdentifierPositiveResponse',
0x71: 'RoutineControlPositiveResponse',
0x74: 'RequestDownloadPositiveResponse',
0x75: 'RequestUploadPositiveResponse',
0x76: 'TransferDataPositiveResponse',
0x77: 'RequestTransferExitPositiveResponse',
0x78: 'RequestFileTransferPositiveResponse',
0x7D: 'WriteMemoryByAddressPositiveResponse',
0x7E: 'TesterPresentPositiveResponse',
0x83: 'AccessTimingParameter',
0x84: 'SecuredDataTransmission',
0x85: 'ControlDTCSetting',
0x86: 'ResponseOnEvent',
0x87: 'LinkControl',
0xC3: 'AccessTimingParameterPositiveResponse',
0xC4: 'SecuredDataTransmissionPositiveResponse',
0xC5: 'ControlDTCSettingPositiveResponse',
0xC6: 'ResponseOnEventPositiveResponse',
0xC7: 'LinkControlPositiveResponse',
0x7f: 'NegativeResponse'}) # type: Dict[int, str]
name = 'UDS'
fields_desc = [
XByteEnumField('service', 0, services)
]
def answers(self, other):
# type: (Union[UDS, Packet]) -> bool
if other.__class__ != self.__class__:
return False
if self.service == 0x7f:
return self.payload.answers(other)
if self.service == (other.service + 0x40):
if isinstance(self.payload, NoPayload) or \
isinstance(other.payload, NoPayload):
return len(self) <= len(other)
else:
return self.payload.answers(other.payload)
return False
def hashret(self):
# type: () -> bytes
if self.service == 0x7f:
return struct.pack('B', self.requestServiceId)
return struct.pack('B', self.service & ~0x40)
# ########################DSC###################################
class UDS_DSC(Packet):
diagnosticSessionTypes = ObservableDict({
0x00: 'ISOSAEReserved',
0x01: 'defaultSession',
0x02: 'programmingSession',
0x03: 'extendedDiagnosticSession',
0x04: 'safetySystemDiagnosticSession',
0x7F: 'ISOSAEReserved'})
name = 'DiagnosticSessionControl'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0, diagnosticSessionTypes)
]
bind_layers(UDS, UDS_DSC, service=0x10)
class UDS_DSCPR(Packet):
name = 'DiagnosticSessionControlPositiveResponse'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0,
UDS_DSC.diagnosticSessionTypes),
StrField('sessionParameterRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_DSC) and \
other.diagnosticSessionType == self.diagnosticSessionType
bind_layers(UDS, UDS_DSCPR, service=0x50)
# #########################ER###################################
class UDS_ER(Packet):
resetTypes = {
0x00: 'ISOSAEReserved',
0x01: 'hardReset',
0x02: 'keyOffOnReset',
0x03: 'softReset',
0x04: 'enableRapidPowerShutDown',
0x05: 'disableRapidPowerShutDown',
0x41: 'powerDown',
0x7F: 'ISOSAEReserved'}
name = 'ECUReset'
fields_desc = [
ByteEnumField('resetType', 0, resetTypes)
]
bind_layers(UDS, UDS_ER, service=0x11)
class UDS_ERPR(Packet):
name = 'ECUResetPositiveResponse'
fields_desc = [
ByteEnumField('resetType', 0, UDS_ER.resetTypes),
ConditionalField(ByteField('powerDownTime', 0),
lambda pkt: pkt.resetType == 0x04)
]
def answers(self, other):
return isinstance(other, UDS_ER)
bind_layers(UDS, UDS_ERPR, service=0x51)
# #########################SA###################################
class UDS_SA(Packet):
name = 'SecurityAccess'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securityAccessDataRecord', b""),
lambda pkt: pkt.securityAccessType % 2 == 1),
ConditionalField(StrField('securityKey', b""),
lambda pkt: pkt.securityAccessType % 2 == 0)
]
bind_layers(UDS, UDS_SA, service=0x27)
class UDS_SAPR(Packet):
name = 'SecurityAccessPositiveResponse'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securitySeed', b""),
lambda pkt: pkt.securityAccessType % 2 == 1),
]
def answers(self, other):
return isinstance(other, UDS_SA) \
and other.securityAccessType == self.securityAccessType
bind_layers(UDS, UDS_SAPR, service=0x67)
# #########################CC###################################
class UDS_CC(Packet):
controlTypes = {
0x00: 'enableRxAndTx',
0x01: 'enableRxAndDisableTx',
0x02: 'disableRxAndEnableTx',
0x03: 'disableRxAndTx'
}
name = 'CommunicationControl'
fields_desc = [
ByteEnumField('controlType', 0, controlTypes),
BitEnumField('communicationType0', 0, 2,
{0: 'ISOSAEReserved',
1: 'normalCommunicationMessages',
2: 'networkManagmentCommunicationMessages',
3: 'networkManagmentCommunicationMessages and '
'normalCommunicationMessages'}),
BitField('communicationType1', 0, 2),
BitEnumField('communicationType2', 0, 4,
{0: 'Disable/Enable specified communication Type',
1: 'Disable/Enable specific subnet',
2: 'Disable/Enable specific subnet',
3: 'Disable/Enable specific subnet',
4: 'Disable/Enable specific subnet',
5: 'Disable/Enable specific subnet',
6: 'Disable/Enable specific subnet',
7: 'Disable/Enable specific subnet',
8: 'Disable/Enable specific subnet',
9: 'Disable/Enable specific subnet',
10: 'Disable/Enable specific subnet',
11: 'Disable/Enable specific subnet',
12: 'Disable/Enable specific subnet',
13: 'Disable/Enable specific subnet',
14: 'Disable/Enable specific subnet',
15: 'Disable/Enable network'})
]
bind_layers(UDS, UDS_CC, service=0x28)
class UDS_CCPR(Packet):
name = 'CommunicationControlPositiveResponse'
fields_desc = [
ByteEnumField('controlType', 0, UDS_CC.controlTypes)
]
def answers(self, other):
return isinstance(other, UDS_CC) \
and other.controlType == self.controlType
bind_layers(UDS, UDS_CCPR, service=0x68)
# #########################TP###################################
class UDS_TP(Packet):
name = 'TesterPresent'
fields_desc = [
ByteField('subFunction', 0)
]
bind_layers(UDS, UDS_TP, service=0x3E)
class UDS_TPPR(Packet):
name = 'TesterPresentPositiveResponse'
fields_desc = [
ByteField('zeroSubFunction', 0)
]
def answers(self, other):
return isinstance(other, UDS_TP)
bind_layers(UDS, UDS_TPPR, service=0x7E)
# #########################ATP###################################
class UDS_ATP(Packet):
timingParameterAccessTypes = {
0: 'ISOSAEReserved',
1: 'readExtendedTimingParameterSet',
2: 'setTimingParametersToDefaultValues',
3: 'readCurrentlyActiveTimingParameters',
4: 'setTimingParametersToGivenValues'
}
name = 'AccessTimingParameter'
fields_desc = [
ByteEnumField('timingParameterAccessType', 0,
timingParameterAccessTypes),
ConditionalField(StrField('timingParameterRequestRecord', b""),
lambda pkt: pkt.timingParameterAccessType == 0x4)
]
bind_layers(UDS, UDS_ATP, service=0x83)
class UDS_ATPPR(Packet):
name = 'AccessTimingParameterPositiveResponse'
fields_desc = [
ByteEnumField('timingParameterAccessType', 0,
UDS_ATP.timingParameterAccessTypes),
ConditionalField(StrField('timingParameterResponseRecord', b""),
lambda pkt: pkt.timingParameterAccessType == 0x3)
]
def answers(self, other):
return isinstance(other, UDS_ATP) \
and other.timingParameterAccessType == \
self.timingParameterAccessType
bind_layers(UDS, UDS_ATPPR, service=0xC3)
# #########################SDT###################################
class UDS_SDT(Packet):
name = 'SecuredDataTransmission'
fields_desc = [
StrField('securityDataRequestRecord', b"")
]
bind_layers(UDS, UDS_SDT, service=0x84)
class UDS_SDTPR(Packet):
name = 'SecuredDataTransmissionPositiveResponse'
fields_desc = [
StrField('securityDataResponseRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_SDT)
bind_layers(UDS, UDS_SDTPR, service=0xC4)
# #########################CDTCS###################################
class UDS_CDTCS(Packet):
DTCSettingTypes = {
0: 'ISOSAEReserved',
1: 'on',
2: 'off'
}
name = 'ControlDTCSetting'
fields_desc = [
ByteEnumField('DTCSettingType', 0, DTCSettingTypes),
StrField('DTCSettingControlOptionRecord', b"")
]
bind_layers(UDS, UDS_CDTCS, service=0x85)
class UDS_CDTCSPR(Packet):
name = 'ControlDTCSettingPositiveResponse'
fields_desc = [
ByteEnumField('DTCSettingType', 0, UDS_CDTCS.DTCSettingTypes)
]
def answers(self, other):
return isinstance(other, UDS_CDTCS)
bind_layers(UDS, UDS_CDTCSPR, service=0xC5)
# #########################ROE###################################
# TODO: improve this protocol implementation
class UDS_ROE(Packet):
eventTypes = {
0: 'doNotStoreEvent',
1: 'storeEvent'
}
name = 'ResponseOnEvent'
fields_desc = [
ByteEnumField('eventType', 0, eventTypes),
ByteField('eventWindowTime', 0),
StrField('eventTypeRecord', b"")
]
bind_layers(UDS, UDS_ROE, service=0x86)
class UDS_ROEPR(Packet):
name = 'ResponseOnEventPositiveResponse'
fields_desc = [
ByteEnumField('eventType', 0, UDS_ROE.eventTypes),
ByteField('numberOfIdentifiedEvents', 0),
ByteField('eventWindowTime', 0),
StrField('eventTypeRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_ROE) \
and other.eventType == self.eventType
bind_layers(UDS, UDS_ROEPR, service=0xC6)
# #########################LC###################################
class UDS_LC(Packet):
linkControlTypes = {
0: 'ISOSAEReserved',
1: 'verifyBaudrateTransitionWithFixedBaudrate',
2: 'verifyBaudrateTransitionWithSpecificBaudrate',
3: 'transitionBaudrate'
}
name = 'LinkControl'
fields_desc = [
ByteEnumField('linkControlType', 0, linkControlTypes),
ConditionalField(ByteField('baudrateIdentifier', 0),
lambda pkt: pkt.linkControlType == 0x1),
ConditionalField(ByteField('baudrateHighByte', 0),
lambda pkt: pkt.linkControlType == 0x2),
ConditionalField(ByteField('baudrateMiddleByte', 0),
lambda pkt: pkt.linkControlType == 0x2),
ConditionalField(ByteField('baudrateLowByte', 0),
lambda pkt: pkt.linkControlType == 0x2)
]
bind_layers(UDS, UDS_LC, service=0x87)
class UDS_LCPR(Packet):
name = 'LinkControlPositiveResponse'
fields_desc = [
ByteEnumField('linkControlType', 0, UDS_LC.linkControlTypes)
]
def answers(self, other):
return isinstance(other, UDS_LC) \
and other.linkControlType == self.linkControlType
bind_layers(UDS, UDS_LCPR, service=0xC7)
# #########################RDBI###################################
class UDS_RDBI(Packet):
dataIdentifiers = ObservableDict()
name = 'ReadDataByIdentifier'
fields_desc = [
FieldListField("identifiers", None,
XShortEnumField('dataIdentifier', 0,
dataIdentifiers))
]
bind_layers(UDS, UDS_RDBI, service=0x22)
class UDS_RDBIPR(Packet):
name = 'ReadDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers),
]
def answers(self, other):
return isinstance(other, UDS_RDBI) \
and self.dataIdentifier in other.identifiers
bind_layers(UDS, UDS_RDBIPR, service=0x62)
# #########################RMBA###################################
class UDS_RMBA(Packet):
name = 'ReadMemoryByAddress'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4),
]
bind_layers(UDS, UDS_RMBA, service=0x23)
class UDS_RMBAPR(Packet):
name = 'ReadMemoryByAddressPositiveResponse'
fields_desc = [
StrField('dataRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RMBA)
bind_layers(UDS, UDS_RMBAPR, service=0x63)
# #########################RSDBI###################################
class UDS_RSDBI(Packet):
name = 'ReadScalingDataByIdentifier'
dataIdentifiers = ObservableDict()
fields_desc = [
XShortEnumField('dataIdentifier', 0, dataIdentifiers)
]
bind_layers(UDS, UDS_RSDBI, service=0x24)
# TODO: Implement correct scaling here, instead of using just the dataRecord
class UDS_RSDBIPR(Packet):
name = 'ReadScalingDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0, UDS_RSDBI.dataIdentifiers),
ByteField('scalingByte', 0),
StrField('dataRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RSDBI) \
and other.dataIdentifier == self.dataIdentifier
bind_layers(UDS, UDS_RSDBIPR, service=0x64)
# #########################RDBPI###################################
class UDS_RDBPI(Packet):
transmissionModes = {
0: 'ISOSAEReserved',
1: 'sendAtSlowRate',
2: 'sendAtMediumRate',
3: 'sendAtFastRate',
4: 'stopSending'
}
name = 'ReadDataByPeriodicIdentifier'
fields_desc = [
ByteEnumField('transmissionMode', 0, transmissionModes),
ByteField('periodicDataIdentifier', 0),
StrField('furtherPeriodicDataIdentifier', b"", fmt="B")
]
bind_layers(UDS, UDS_RDBPI, service=0x2A)
# TODO: Implement correct scaling here, instead of using just the dataRecord
class UDS_RDBPIPR(Packet):
name = 'ReadDataByPeriodicIdentifierPositiveResponse'
fields_desc = [
ByteField('periodicDataIdentifier', 0),
StrField('dataRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RDBPI) \
and other.periodicDataIdentifier == self.periodicDataIdentifier
bind_layers(UDS, UDS_RDBPIPR, service=0x6A)
# #########################DDDI###################################
# TODO: Implement correct interpretation here,
# instead of using just the dataRecord
class UDS_DDDI(Packet):
name = 'DynamicallyDefineDataIdentifier'
subFunctions = {0x1: "defineByIdentifier",
0x2: "defineByMemoryAddress",
0x3: "clearDynamicallyDefinedDataIdentifier"}
fields_desc = [
ByteEnumField('subFunction', 0, subFunctions),
StrField('dataRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_DDDI, service=0x2C)
class UDS_DDDIPR(Packet):
name = 'DynamicallyDefineDataIdentifierPositiveResponse'
fields_desc = [
ByteEnumField('subFunction', 0, UDS_DDDI.subFunctions),
XShortField('dynamicallyDefinedDataIdentifier', 0)
]
def answers(self, other):
return isinstance(other, UDS_DDDI) \
and other.subFunction == self.subFunction
bind_layers(UDS, UDS_DDDIPR, service=0x6C)
# #########################WDBI###################################
class UDS_WDBI(Packet):
name = 'WriteDataByIdentifier'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers)
]
bind_layers(UDS, UDS_WDBI, service=0x2E)
class UDS_WDBIPR(Packet):
name = 'WriteDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers),
]
def answers(self, other):
return isinstance(other, UDS_WDBI) \
and other.dataIdentifier == self.dataIdentifier
bind_layers(UDS, UDS_WDBIPR, service=0x6E)
# #########################WMBA###################################
class UDS_WMBA(Packet):
name = 'WriteMemoryByAddress'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4),
StrField('dataRecord', b'', fmt="B"),
]
bind_layers(UDS, UDS_WMBA, service=0x3D)
class UDS_WMBAPR(Packet):
name = 'WriteMemoryByAddressPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
def answers(self, other):
return isinstance(other, UDS_WMBA) \
and other.memorySizeLen == self.memorySizeLen \
and other.memoryAddressLen == self.memoryAddressLen
bind_layers(UDS, UDS_WMBAPR, service=0x7D)
# #########################CDTCI###################################
class UDS_CDTCI(Packet):
name = 'ClearDiagnosticInformation'
fields_desc = [
ByteField('groupOfDTCHighByte', 0),
ByteField('groupOfDTCMiddleByte', 0),
ByteField('groupOfDTCLowByte', 0),
]
bind_layers(UDS, UDS_CDTCI, service=0x14)
class UDS_CDTCIPR(Packet):
name = 'ClearDiagnosticInformationPositiveResponse'
def answers(self, other):
return isinstance(other, UDS_CDTCI)
bind_layers(UDS, UDS_CDTCIPR, service=0x54)
# #########################RDTCI###################################
class UDS_RDTCI(Packet):
reportTypes = {
0: 'ISOSAEReserved',
1: 'reportNumberOfDTCByStatusMask',
2: 'reportDTCByStatusMask',
3: 'reportDTCSnapshotIdentification',
4: 'reportDTCSnapshotRecordByDTCNumber',
5: 'reportDTCSnapshotRecordByRecordNumber',
6: 'reportDTCExtendedDataRecordByDTCNumber',
7: 'reportNumberOfDTCBySeverityMaskRecord',
8: 'reportDTCBySeverityMaskRecord',
9: 'reportSeverityInformationOfDTC',
10: 'reportSupportedDTC',
11: 'reportFirstTestFailedDTC',
12: 'reportFirstConfirmedDTC',
13: 'reportMostRecentTestFailedDTC',
14: 'reportMostRecentConfirmedDTC',
15: 'reportMirrorMemoryDTCByStatusMask',
16: 'reportMirrorMemoryDTCExtendedDataRecordByDTCNumber',
17: 'reportNumberOfMirrorMemoryDTCByStatusMask',
18: 'reportNumberOfEmissionsRelatedOBDDTCByStatusMask',
19: 'reportEmissionsRelatedOBDDTCByStatusMask',
20: 'reportDTCFaultDetectionCounter',
21: 'reportDTCWithPermanentStatus'
}
name = 'ReadDTCInformation'
fields_desc = [
ByteEnumField('reportType', 0, reportTypes),
ConditionalField(ByteField('DTCSeverityMask', 0),
lambda pkt: pkt.reportType in [0x07, 0x08]),
ConditionalField(XByteField('DTCStatusMask', 0),
lambda pkt: pkt.reportType in [
0x01, 0x02, 0x07, 0x08, 0x0f, 0x11, 0x12, 0x13]),
ConditionalField(ByteField('DTCHighByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCMiddleByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCLowByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCSnapshotRecordNumber', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x5]),
ConditionalField(ByteField('DTCExtendedDataRecordNumber', 0),
lambda pkt: pkt.reportType in [0x6, 0x10])
]
bind_layers(UDS, UDS_RDTCI, service=0x19)
class UDS_RDTCIPR(Packet):
name = 'ReadDTCInformationPositiveResponse'
fields_desc = [
ByteEnumField('reportType', 0, UDS_RDTCI.reportTypes),
ConditionalField(XByteField('DTCStatusAvailabilityMask', 0),
lambda pkt: pkt.reportType in [0x01, 0x07, 0x11,
0x12, 0x02, 0x0A,
0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x13,
0x15]),
ConditionalField(ByteEnumField('DTCFormatIdentifier', 0,
{0: 'ISO15031-6DTCFormat',
1: 'UDS-1DTCFormat',
2: 'SAEJ1939-73DTCFormat',
3: 'ISO11992-4DTCFormat'}),
lambda pkt: pkt.reportType in [0x01, 0x07,
0x11, 0x12]),
ConditionalField(ShortField('DTCCount', 0),
lambda pkt: pkt.reportType in [0x01, 0x07,
0x11, 0x12]),
ConditionalField(StrField('DTCAndStatusRecord', b""),
lambda pkt: pkt.reportType in [0x02, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E,
0x0F, 0x13, 0x15]),
ConditionalField(StrField('dataRecord', b""),
lambda pkt: pkt.reportType in [0x03, 0x04, 0x05,
0x06, 0x08, 0x09,
0x10, 0x14])
]
def answers(self, other):
return isinstance(other, UDS_RDTCI) \
and other.reportType == self.reportType
bind_layers(UDS, UDS_RDTCIPR, service=0x59)
# #########################RC###################################
class UDS_RC(Packet):
routineControlTypes = {
0: 'ISOSAEReserved',
1: 'startRoutine',
2: 'stopRoutine',
3: 'requestRoutineResults'
}
routineControlIdentifiers = ObservableDict()
name = 'RoutineControl'
fields_desc = [
ByteEnumField('routineControlType', 0, routineControlTypes),
XShortEnumField('routineIdentifier', 0, routineControlIdentifiers)
]
bind_layers(UDS, UDS_RC, service=0x31)
class UDS_RCPR(Packet):
name = 'RoutineControlPositiveResponse'
fields_desc = [
ByteEnumField('routineControlType', 0, UDS_RC.routineControlTypes),
XShortEnumField('routineIdentifier', 0,
UDS_RC.routineControlIdentifiers),
]
def answers(self, other):
return isinstance(other, UDS_RC) \
and other.routineControlType == self.routineControlType \
and other.routineIdentifier == self.routineIdentifier
bind_layers(UDS, UDS_RCPR, service=0x71)
# #########################RD###################################
class UDS_RD(Packet):
dataFormatIdentifiers = ObservableDict({
0: 'noCompressionNoEncryption'
})
name = 'RequestDownload'
fields_desc = [
ByteEnumField('dataFormatIdentifier', 0, dataFormatIdentifiers),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_RD, service=0x34)
class UDS_RDPR(Packet):
name = 'RequestDownloadPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('reserved', 0, 4),
StrField('maxNumberOfBlockLength', b"", fmt="B"),
]
def answers(self, other):
return isinstance(other, UDS_RD)
bind_layers(UDS, UDS_RDPR, service=0x74)
# #########################RU###################################
class UDS_RU(Packet):
name = 'RequestUpload'
fields_desc = [
ByteEnumField('dataFormatIdentifier', 0,
UDS_RD.dataFormatIdentifiers),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_RU, service=0x35)
class UDS_RUPR(Packet):
name = 'RequestUploadPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('reserved', 0, 4),
StrField('maxNumberOfBlockLength', b"", fmt="B"),
]
def answers(self, other):
return isinstance(other, UDS_RU)
bind_layers(UDS, UDS_RUPR, service=0x75)
# #########################TD###################################
class UDS_TD(Packet):
name = 'TransferData'
fields_desc = [
ByteField('blockSequenceCounter', 0),
StrField('transferRequestParameterRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_TD, service=0x36)
class UDS_TDPR(Packet):
name = 'TransferDataPositiveResponse'
fields_desc = [
ByteField('blockSequenceCounter', 0),
StrField('transferResponseParameterRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_TD) \
and other.blockSequenceCounter == self.blockSequenceCounter
bind_layers(UDS, UDS_TDPR, service=0x76)
# #########################RTE###################################
class UDS_RTE(Packet):
name = 'RequestTransferExit'
fields_desc = [
StrField('transferRequestParameterRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_RTE, service=0x37)
class UDS_RTEPR(Packet):
name = 'RequestTransferExitPositiveResponse'
fields_desc = [
StrField('transferResponseParameterRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RTE)
bind_layers(UDS, UDS_RTEPR, service=0x77)
# #########################RFT###################################
class UDS_RFT(Packet):
name = 'RequestFileTransfer'
modeOfOperations = {
0x00: "ISO/SAE Reserved",
0x01: "Add File",
0x02: "Delete File",
0x03: "Replace File",
0x04: "Read File",
0x05: "Read Directory"
}
@staticmethod
def _contains_file_size(packet):
return packet.modeOfOperation not in [2, 4, 5]
fields_desc = [
XByteEnumField('modeOfOperation', 0, modeOfOperations),
FieldLenField('filePathAndNameLength', 0,
length_of='filePathAndName', fmt='H'),
StrLenField('filePathAndName', b"",
length_from=lambda p: p.filePathAndNameLength),
ConditionalField(BitField('compressionMethod', 0, 4),
lambda p: p.modeOfOperation not in [2, 5]),
ConditionalField(BitField('encryptingMethod', 0, 4),
lambda p: p.modeOfOperation not in [2, 5]),
ConditionalField(FieldLenField('fileSizeParameterLength', 0, fmt="B",
length_of='fileSizeUnCompressed'),
lambda p: UDS_RFT._contains_file_size(p)),
ConditionalField(StrLenField('fileSizeUnCompressed', b"",
length_from=lambda p:
p.fileSizeParameterLength),
lambda p: UDS_RFT._contains_file_size(p)),
ConditionalField(StrLenField('fileSizeCompressed', b"",
length_from=lambda p:
p.fileSizeParameterLength),
lambda p: UDS_RFT._contains_file_size(p))
]
bind_layers(UDS, UDS_RFT, service=0x38)
class UDS_RFTPR(Packet):
name = 'RequestFileTransferPositiveResponse'
@staticmethod
def _contains_data_format_identifier(packet):
return packet.modeOfOperation != 0x02
fields_desc = [
XByteEnumField('modeOfOperation', 0, UDS_RFT.modeOfOperations),
ConditionalField(FieldLenField('lengthFormatIdentifier', 0,
length_of='maxNumberOfBlockLength',
fmt='B'),
lambda p: p.modeOfOperation != 2),
ConditionalField(StrLenField('maxNumberOfBlockLength', b"",
length_from=lambda p: p.lengthFormatIdentifier),
lambda p: p.modeOfOperation != 2),
ConditionalField(BitField('compressionMethod', 0, 4),
lambda p: p.modeOfOperation != 0x02),
ConditionalField(BitField('encryptingMethod', 0, 4),
lambda p: p.modeOfOperation != 0x02),
ConditionalField(FieldLenField('fileSizeOrDirInfoParameterLength', 0,
length_of='fileSizeUncompressedOrDirInfoLength'),
lambda p: p.modeOfOperation not in [1, 2, 3]),
ConditionalField(StrLenField('fileSizeUncompressedOrDirInfoLength',
b"",
length_from=lambda p:
p.fileSizeOrDirInfoParameterLength),
lambda p: p.modeOfOperation not in [1, 2, 3]),
ConditionalField(StrLenField('fileSizeCompressed', b"",
length_from=lambda p:
p.fileSizeOrDirInfoParameterLength),
lambda p: p.modeOfOperation not in [1, 2, 3, 5]),
]
def answers(self, other):
return isinstance(other, UDS_RFT)
bind_layers(UDS, UDS_RFTPR, service=0x78)
# #########################IOCBI###################################
class UDS_IOCBI(Packet):
name = 'InputOutputControlByIdentifier'
dataIdentifiers = ObservableDict()
fields_desc = [
XShortEnumField('dataIdentifier', 0, dataIdentifiers),
ByteField('controlOptionRecord', 0),
StrField('controlEnableMaskRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_IOCBI, service=0x2F)
class UDS_IOCBIPR(Packet):
name = 'InputOutputControlByIdentifierPositiveResponse'
fields_desc = [
XShortField('dataIdentifier', 0),
StrField('controlStatusRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_IOCBI) \
and other.dataIdentifier == self.dataIdentifier
bind_layers(UDS, UDS_IOCBIPR, service=0x6F)
# #########################NR###################################
class UDS_NR(Packet):
negativeResponseCodes = {
0x00: 'positiveResponse',
0x10: 'generalReject',
0x11: 'serviceNotSupported',
0x12: 'subFunctionNotSupported',
0x13: 'incorrectMessageLengthOrInvalidFormat',
0x14: 'responseTooLong',
0x20: 'ISOSAEReserved',
0x21: 'busyRepeatRequest',
0x22: 'conditionsNotCorrect',
0x23: 'ISOSAEReserved',
0x24: 'requestSequenceError',
0x25: 'noResponseFromSubnetComponent',
0x26: 'failurePreventsExecutionOfRequestedAction',
0x31: 'requestOutOfRange',
0x33: 'securityAccessDenied',
0x35: 'invalidKey',
0x36: 'exceedNumberOfAttempts',
0x37: 'requiredTimeDelayNotExpired',
0x70: 'uploadDownloadNotAccepted',
0x71: 'transferDataSuspended',
0x72: 'generalProgrammingFailure',
0x73: 'wrongBlockSequenceCounter',
0x78: 'requestCorrectlyReceived-ResponsePending',
0x7E: 'subFunctionNotSupportedInActiveSession',
0x7F: 'serviceNotSupportedInActiveSession',
0x80: 'ISOSAEReserved',
0x81: 'rpmTooHigh',
0x82: 'rpmTooLow',
0x83: 'engineIsRunning',
0x84: 'engineIsNotRunning',
0x85: 'engineRunTimeTooLow',
0x86: 'temperatureTooHigh',
0x87: 'temperatureTooLow',
0x88: 'vehicleSpeedTooHigh',
0x89: 'vehicleSpeedTooLow',
0x8a: 'throttle/PedalTooHigh',
0x8b: 'throttle/PedalTooLow',
0x8c: 'transmissionRangeNotInNeutral',
0x8d: 'transmissionRangeNotInGear',
0x8e: 'ISOSAEReserved',
0x8f: 'brakeSwitch(es)NotClosed',
0x90: 'shifterLeverNotInPark',
0x91: 'torqueConverterClutchLocked',
0x92: 'voltageTooHigh',
0x93: 'voltageTooLow',
}
name = 'NegativeResponse'
fields_desc = [
XByteEnumField('requestServiceId', 0, UDS.services),
ByteEnumField('negativeResponseCode', 0, negativeResponseCodes)
]
def answers(self, other):
return self.requestServiceId == other.service and \
(self.negativeResponseCode != 0x78 or
conf.contribs['UDS']['treat-response-pending-as-answer'])
bind_layers(UDS, UDS_NR, service=0x7f)
# ##################################################################
# ######################## UTILS ###################################
# ##################################################################
class UDS_TesterPresentSender(PeriodicSenderThread):
def __init__(self, sock, pkt=UDS() / UDS_TP(), interval=2):
""" Thread to send TesterPresent messages packets periodically
Args:
sock: socket where packet is sent periodically
pkt: packet to send
interval: interval between two packets
"""
PeriodicSenderThread.__init__(self, sock, pkt, interval)
def run(self):
# type: () -> None
while not self._stopped.is_set():
for p in self._pkts:
self._socket.sr1(p, timeout=0.3, verbose=False)
time.sleep(self._interval)
def UDS_SessionEnumerator(sock, session_range=range(0x100), reset_wait=1.5):
""" Enumerates session ID's in given range
and returns list of UDS()/UDS_DSC() packets
with valid session types
Args:
sock: socket where packets are sent
session_range: range for session ID's
reset_wait: wait time in sec after every packet
"""
pkts = (req for tup in
product(UDS() / UDS_DSC(diagnosticSessionType=session_range),
UDS() / UDS_ER(resetType='hardReset')) for req in tup)
results, _ = sock.sr(pkts, timeout=len(session_range) * reset_wait * 2 + 1,
verbose=False, inter=reset_wait)
return [req for req, res in results if req is not None and
req.service != 0x11 and
(res.service == 0x50 or
res.negativeResponseCode not in [0x10, 0x11, 0x12])]
def UDS_ServiceEnumerator(sock, session="DefaultSession",
filter_responses=True):
""" Enumerates every service ID
and returns list of tuples. Each tuple contains
the session and the respective positive response
Args:
sock: socket where packet is sent periodically
session: session in which the services are enumerated
"""
pkts = (UDS(service=x) for x in set(x & ~0x40 for x in range(0x100)))
found_services = sock.sr(pkts, timeout=5, verbose=False)
return [(session, p) for _, p in found_services[0] if
p.service != 0x7f or
(p.negativeResponseCode not in [0x10, 0x11] or not
filter_responses)]
def getTableEntry(tup):
""" Helping function for make_lined_table.
Returns the session and response code of tup.
Args:
tup: tuple with session and UDS response package
Example:
make_lined_table([('DefaultSession', UDS()/UDS_SAPR(),
'ExtendedDiagnosticSession', UDS()/UDS_IOCBI())],
getTableEntry)
"""
session, pkt = tup
if pkt.service == 0x7f:
return (session,
"0x%02x: %s" % (pkt.requestServiceId,
pkt.sprintf("%UDS_NR.requestServiceId%")),
pkt.sprintf("%UDS_NR.negativeResponseCode%"))
else:
return (session,
"0x%02x: %s" % (pkt.service & ~0x40,
pkt.get_field('service').
i2s[pkt.service & ~0x40]),
"PositiveResponse")
|
4shadoww/usploit
|
lib/scapy/contrib/automotive/uds.py
|
Python
|
mit
| 44,266 | 0 |
"""
Defines common types and type related utilities: Singleton, etc.
These types can be shared by other utils modules and imported into util main namespace for use by other pymel modules
"""
import inspect, types, operator, sys, warnings
class Singleton(type):
""" Metaclass for Singleton classes.
>>> class DictSingleton(dict) :
... __metaclass__ = Singleton
...
>>> DictSingleton({'A':1})
{'A': 1}
>>> a = DictSingleton()
>>> a
{'A': 1}
>>> b = DictSingleton({'B':2})
>>> a, b, DictSingleton()
({'B': 2}, {'B': 2}, {'B': 2})
>>> a is b and a is DictSingleton()
True
>>> class StringSingleton(str) :
... __metaclass__ = Singleton
...
>>> StringSingleton("first")
'first'
>>> a = StringSingleton()
>>> a
'first'
>>> b = StringSingleton("changed")
>>> a, b, StringSingleton()
('first', 'first', 'first')
>>> a is b and a is StringSingleton()
True
>>> class DictSingleton2(DictSingleton):
... pass
...
>>> DictSingleton2({'A':1})
{'A': 1}
>>> a = DictSingleton2()
>>> a
{'A': 1}
>>> b = DictSingleton2({'B':2})
>>> a, b, DictSingleton2()
({'B': 2}, {'B': 2}, {'B': 2})
>>> a is b and a is DictSingleton2()
True
"""
def __new__(mcl, classname, bases, classdict):
# newcls = super(Singleton, mcl).__new__(mcl, classname, bases, classdict)
# redefine __new__
def __new__(cls, *p, **k):
if '_the_instance' not in cls.__dict__:
cls._the_instance = super(newcls, cls).__new__(cls, *p, **k)
return cls._the_instance
newdict = { '__new__': __new__}
# define __init__ if it has not been defined in the class being created
def __init__(self, *p, **k):
cls = self.__class__
if p :
if hasattr(self, 'clear') :
self.clear()
else :
super(newcls, self).__init__()
super(newcls, self).__init__(*p, **k)
if '__init__' not in classdict :
newdict['__init__'] = __init__
# Note: could have defined the __new__ method like it is done in Singleton but it's as easy to derive from it
for k in classdict :
if k in newdict :
warnings.warn("Attribute %r is predefined in class %r of type %r and can't be overriden" % (k, classname, mcl.__name__))
else :
newdict[k] = classdict[k]
newcls = super(Singleton, mcl).__new__(mcl, classname, bases, newdict)
return newcls
class metaStatic(Singleton) :
""" A static (immutable) Singleton metaclass to quickly build classes
holding predefined immutable dicts
>>> class FrozenDictSingleton(dict) :
... __metaclass__ = metaStatic
...
>>> FrozenDictSingleton({'A':1})
{'A': 1}
>>> a = FrozenDictSingleton()
>>> a
{'A': 1}
>>> b = FrozenDictSingleton()
>>> a, b
({'A': 1}, {'A': 1})
>>> a is b
True
>>> b = FrozenDictSingleton({'B':2})
Traceback (most recent call last):
...
TypeError: 'FrozenDictSingleton' object does not support redefinition
>>> a['A']
1
>>> a['A'] = 2 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: '<class '...FrozenDictSingleton'>' object does not support item assignation
>>> a.clear()
Traceback (most recent call last):
...
AttributeError: 'FrozenDictSingleton' object has no attribute 'clear'
>>> a, b, FrozenDictSingleton()
({'A': 1}, {'A': 1}, {'A': 1})
>>> a is b and a is FrozenDictSingleton()
True
>>> class StaticTest(FrozenDictSingleton):
... pass
...
>>> StaticTest({'A': 1})
{'A': 1}
>>> a = StaticTest()
>>> a
{'A': 1}
>>> b = StaticTest()
>>> a, b
({'A': 1}, {'A': 1})
>>> class StaticTest2( StaticTest ):
... pass
...
>>> StaticTest2({'B': 2})
{'B': 2}
>>> a = StaticTest2()
>>> a
{'B': 2}
>>> b = StaticTest2()
>>> a, b
({'B': 2}, {'B': 2})
"""
def __new__(mcl, classname, bases, classdict):
"""
"""
# redefine __init__
def __init__(self, *p, **k):
cls = self.__class__
# Can only create once)
if p :
# Can only init once
if not self:
return super(newcls, self).__init__(*p, **k)
else :
raise TypeError, "'"+classname+"' object does not support redefinition"
newdict = { '__init__':__init__}
# hide methods with might herit from a mutable base
def __getattribute__(self, name):
if name in newcls._hide :
raise AttributeError, "'"+classname+"' object has no attribute '"+name+"'"
else :
return super(newcls, self).__getattribute__(name)
newdict['__getattribute__'] = __getattribute__
_hide = ('clear', 'update', 'pop', 'popitem', '__setitem__', '__delitem__', 'append', 'extend' )
newdict['_hide'] = _hide
# prevent item assignation or deletion
def __setitem__(self, key, value) :
raise TypeError, "'%s' object does not support item assignation" % (self.__class__)
newdict['__setitem__'] = __setitem__
def __delitem__(self, key):
raise TypeError, "'%s' object does not support item deletion" % (self.__class__)
newdict['__delitem__'] = __delitem__
# Now add methods of the defined class, as long as it doesn't try to redefine
# Note: could have defined the __new__ method like it is done in Singleton but it's as easy to derive from it
for k in classdict :
if k in newdict :
warnings.warn("Attribute %r is predefined in class %r of type %r and can't be overriden" % (k, classname, mcl.__name__))
else :
newdict[k] = classdict[k]
newcls = super(metaStatic, mcl).__new__(mcl, classname, bases, newdict)
return newcls
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
class defaultlist(list):
def __init__(self, default_factory, *args, **kwargs ):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
list.__init__(self,*args, **kwargs)
self.default_factory = default_factory
def __setitem__( self, index, item ):
try:
list.__setitem__(self, index, item)
except IndexError:
diff = index - len(self) - 1
assert diff > 0
self.extend( [self.default_factory() ] * diff + [item] )
def __getitem__(self, index):
try:
return list.__getitem__(self, index)
except IndexError:
return self.default_factory()
class ModuleInterceptor(object):
"""
This class is used to intercept an unset attribute of a module to perfrom a callback. The
callback will only be performed if the attribute does not exist on the module. Any error raised
in the callback will cause the original AttributeError to be raised.
def cb( module, attr):
if attr == 'this':
print "intercepted"
else:
raise ValueError
import sys
sys.modules[__name__] = ModuleInterceptor(__name__, cb)
intercepted
The class does not work when imported into the main namespace.
"""
def __init__(self, moduleName, callback):
self.module = __import__( moduleName , globals(), locals(), [''] )
self.callback = callback
def __getattr__(self, attr):
try:
return getattr(self.module, attr)
except AttributeError, msg:
try:
self.callback( self.module, attr)
except:
raise AttributeError, msg
# read only decorator
def readonly(f) :
""" Marks a class member as protected, allowing metaProtected to prevent re-assignation on the classes it generates """
f.__readonly__ = None
return f
class metaReadOnlyAttr(type) :
""" A metaclass to allow to define read-only class attributes, accessible either on the class or it's instances
and protected against re-write or re-definition.
Read only attributes are stored in the class '__readonly__' dictionary.
Any attribute can be marked as read only by including its name in a tuple named '__readonly__' in the class
definition. Alternatively methods can be marked as read only with the @readonly decorator and will then get
added to the dictionary at class creation """
def __setattr__(cls, name, value):
""" overload __setattr__ to forbid modification of read only class info """
readonly = {}
for c in inspect.getmro(cls) :
if hasattr(c, '__readonly__') :
readonly.update(c.__readonly__)
if name in readonly :
raise AttributeError, "attribute %s is a read only class attribute and cannot be modified on class %s" % (name, cls.__name__)
else :
super(metaReadOnlyAttr, cls).__setattr__(name, value)
def __new__(mcl, classname, bases, classdict):
""" Create a new metaReadOnlyAttr class """
# checks for protected members, in base classes on in class to be created
readonly = {}
# check for protected members in class definition
if '__readonly__' in classdict :
readonly.update(dict((a, None) for a in classdict['__readonly__']))
for a in classdict :
if hasattr(classdict[a], '__readonly__') :
readonly[a] = None
readonly['__readonly__'] = None
classdict['__readonly__'] = readonly
# the use of __slots__ protects instance attributes
# slots = []
# if '__slots__' in classdict :
# slots = list(classdict['__slots__'])
# create the new class
newcls = super(metaReadOnlyAttr, mcl).__new__(mcl, classname, bases, classdict)
# if hasattr(newcls, '__slots__') :
# for s in newcls.__slots__ :
# if s not in slots :
# slots.append(s)
# type.__setattr__(newcls, '__slots__', slots)
# unneeded through the use of __slots__
# def __setattr__(self, name, value):
# """ overload __setattr__ to forbid overloading of read only class info on a class instance """
# try :
# readonly = newcls.__readonly__
# except :
# readonly = {}
# if name in readonly :
# raise AttributeError, "attribute '%s' is a read only class attribute of class %s and cannot be overloaded on an instance of class %s" % (name, self.__class__.__name__, self.__class__.__name__)
# else :
# super(newcls, self).__setattr__(name, value)
#
# type.__setattr__(newcls, '__setattr__', __setattr__)
return newcls
NOT_PROXY_WRAPPED = ['__new__', '__getattribute__', '__getattr__', '__setattr__',
'__class__', '__weakref__', '__subclasshook__',
'__reduce_ex__', '__reduce__', '__dict__', '__sizeof__',
'__module__', '__init__', '__doc__']
def proxyClass( cls, classname, dataAttrName = None, dataFuncName=None,
remove=(), makeDefaultInit = False, sourceIsImmutable=True ):
"""
This function will generate a proxy class which keeps the internal data separate from the wrapped class. This
is useful for emulating immutable types such as str and tuple, while using mutable data. Be aware that changing data
will break hashing. not sure the best solution to this, but a good approach would be to subclass your proxy and implement
a valid __hash__ method.
:Parameters:
cls : `type`
The class to wrap
classname : `string`
The name to give the resulting proxy class
dataAttrName : `string`
The name of an attribute on which an instance of the wrapped class will
be stored.
Either dataAttrname or dataFuncName must be given, but not both.
dataFuncName : `string`
The name of an attribute on which reside a function, which takes no
arguments, and when called, will return an instance of the wrapped
class.
Either dataAttrname or dataFuncName must be given, but not both.
remove : `string` iterable
An iterable of name of attributes which should NOT be wrapped.
Note that certain attributes will never be wrapped - the list of
such items is found in the NOT_PROXY_WRAPPED constant.
makeDefaultInit : `bool`
If True and dataAttrName is True, then a 'default' __init__ function
will be created, which creates an instance of the wrapped class, and
assigns it to the dataAttr. Defaults to False
If dataAttrName is False, does nothing
sourceIsImmutable : `bool`
This parameter is included only for backwards compatibility - it is
ignored.
:rtype: `type`
"""
assert not ( dataAttrName and dataFuncName ), 'Cannot use attribute and function for data storage. Choose one or the other.'
if dataAttrName:
class ProxyAttribute(object):
def __init__(self, name):
self.name = name
def __get__(self, proxyInst, proxyClass):
if proxyInst is None:
return getattr(cls, self.name)
else:
return getattr(getattr(proxyInst, dataAttrName),
self.name)
def _methodWrapper( method ):
def wrapper(self, *args, **kwargs):
return method( getattr(self, dataAttrName), *args, **kwargs )
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
elif dataFuncName:
class ProxyAttribute(object):
def __init__(self, name):
self.name = name
def __get__(self, proxyInst, proxyClass):
if proxyInst is None:
return getattr(cls, self.name)
else:
return getattr(getattr(proxyInst, dataFuncName)(),
self.name)
def _methodWrapper( method ):
#print method
#@functools.wraps(f)
def wrapper(self, *args, **kwargs):
return method( getattr(self, dataFuncName)(), *args, **kwargs )
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
else:
raise TypeError, 'Must specify either a dataAttrName or a dataFuncName'
class Proxy(object):
# make a default __init__ which sets the dataAttr...
# if __init__ is in remove, or dataFuncName given,
# user must supply own __init__, and set the dataAttr/dataFunc
# themselves
if makeDefaultInit and dataAttrName:
def __init__(self, *args, **kwargs):
# We may wrap __setattr__, so don't use 'our' __setattr__!
object.__setattr__(self, dataAttrName, cls(*args, **kwargs))
# For 'type' objects, you can't set the __doc__ outside of
# the class definition, so do it here:
if '__doc__' not in remove:
__doc__ = cls.__doc__
remove = set(remove)
remove.update(NOT_PROXY_WRAPPED)
#remove = [ '__init__', '__getattribute__', '__getattr__'] + remove
for attrName, attrValue in inspect.getmembers(cls):
if attrName not in remove:
# We wrap methods using _methodWrapper, because if someone does
# unboundMethod = MyProxyClass.method
# ...they should be able to call unboundMethod with an instance
# of MyProxyClass as they expect (as opposed to an instance of
# the wrapped class, which is what you would need to do if
# we used ProxyAttribute)
# ...the stuff with the cls.__dict__ is just to check
# we don't have a classmethod - since it's a data descriptor,
# we have to go through the class dict...
if ((inspect.ismethoddescriptor(attrValue) or
inspect.ismethod(attrValue)) and
not isinstance(cls.__dict__.get(attrName, None),
(classmethod, staticmethod))):
try:
setattr( Proxy, attrName, _methodWrapper(attrValue) )
except AttributeError:
print "proxyClass: error adding proxy method %s.%s" % (classname, attrName)
else:
try:
setattr( Proxy, attrName, ProxyAttribute(attrName) )
except AttributeError:
print "proxyClass: error adding proxy attribute %s.%s" % (classname, attrName)
Proxy.__name__ = classname
return Proxy
# Note - for backwards compatibility reasons, PyNodes still inherit from
# ProxyUnicode, even though we are now discouraging their use 'like strings',
# and ProxyUnicode itself has now had so many methods removed from it that
# it's no longer really a good proxy for unicode.
# NOTE: This may move back to core.general, depending on whether the __getitem__ bug was fixed in 2009, since we'll have to do a version switch there
#ProxyUnicode = proxyClass( unicode, 'ProxyUnicode', dataFuncName='name', remove=['__getitem__', 'translate']) # 2009 Beta 2.1 has issues with passing classes with __getitem__
ProxyUnicode = proxyClass( unicode, 'ProxyUnicode', dataFuncName='name',
remove=[ '__doc__', '__getslice__', '__contains__', '__len__',
'__mod__', '__rmod__', '__mul__', '__rmod__', '__rmul__', # reserved for higher levels
'expandtabs', 'translate', 'decode', 'encode', 'splitlines',
'capitalize', 'swapcase', 'title',
'isalnum', 'isalpha', 'isdigit', 'isspace', 'istitle',
'zfill' ])
class universalmethod(object):
# """
# a decorator which is similar to builtin classmethod, but which leaves the method unmodified when called
# as a normal instance method:
# - when the wrapped method is called as a class method, the first argument will be the class.
# - when the wrapped method is called as an instance method, the first argument will be the instance.
#
# >>> import inspect
# >>> class D(object):
# ... @universalmethod
# ... def f( obj ):
# ... if inspect.isclass(obj):
# ... print "doing something class related"
# ... else:
# ... print "doing something instance related"
# ...
# >>> D.f()
# doing something class related
# >>> d = D()
# >>> d.f()
# doing something instance related
#
# """
def __init__(self, f):
self.f = f
def __get__(self, instance, cls=None):
if cls is None:
cls = type(instance)
if instance is None:
instance = cls
def newfunc(*args, **kwargs):
return self.f(instance, *args, **kwargs)
return newfunc
def LazyLoadModule(name, contents):
"""
:param name: name of the module
:param contents: dictionary of initial module globals
This function returns a special module type with one method `_addattr`. The signature
of this method is:
_addattr(name, creator, *creatorArgs, **creatorKwargs)
Attributes added with this method will not be created until the first time that
they are accessed, at which point a callback function will be called to generate
the attribute's value.
:param name: name of the attribute to lazily add
:param creator: a function that create the
Example::
import sys
mod = LazyLoadModule(__name__, globals())
mod._addattr( 'foo', str, 'bar' )
sys.modules[__name__] = mod
One caveat of this technique is that if a user imports everything from your
lazy module ( .e.g from module import * ), it will cause all lazy attributes
to be evaluated.
Also, if any module-level expression needs to reference something that only
exists in the LazyLoadModule, it will need to be stuck in after the creation of the
LazyLoadModule. Then, typically, after defining all functions/classes/etc
which rely on the LazyLoadModule attributes, you will wish to update the
LazyLoadModule with the newly-created functions - typically, this is done
with the _updateLazyModule method.
Finally, any functions which reference any LazyLoadModule-only attributes,
whether they are defined after OR before the creation of the LazyLoadModule,
will have to prefix it with a reference to the LazyLoadModule.
Example::
import sys
def myFunc():
# need to preface foo with 'lazyModule',
# even though this function is defined before
# the creation of the lazy module!
print 'foo is:', lazyModule.foo
mod = lazyLoadModule(__name__, globals())
mod._addattr( 'foo', str, 'bar' )
sys.modules[__name__] = mod
# create a reference to the LazyLoadModule in this module's
# global space
lazyModule = sys.modules[__name__]
# define something which relies on something in the lazy module
fooExpanded = lazyModule.foo + '... now with MORE!'
# update the lazyModule with our new additions (ie, fooExpanded)
lazyModule._updateLazyModule(globals())
"""
class _LazyLoadModule(types.ModuleType):
class LazyLoader(object):
"""
A data descriptor that delays instantiation of an object
until it is first accessed.
"""
def __init__(self, name, creator, *creatorArgs, **creatorKwargs):
self.creator = creator
self.args = creatorArgs
self.kwargs = creatorKwargs
self.name = name
def __get__(self, obj, objtype):
# In case the LazyLoader happens to get stored on more
# than one object, cache the created object so the exact
# same one will be returned
if not hasattr(self, 'newobj'):
# use the callback to create the object that will replace us
self.newobj = self.creator(*self.args, **self.kwargs)
if isinstance(obj, types.ModuleType) and hasattr(self.newobj, '__module__'):
self.newobj.__module__ = obj.__name__
#print "Lazy-loaded object:", self.name
#delattr( obj.__class__, self.name) # should we overwrite with None?
# overwrite ourselves with the newly created object
setattr( obj, self.name, self.newobj)
return self.newobj
def __init__(self, name, contents):
types.ModuleType.__init__(self, name)
self.__dict__.update(contents)
self._lazyGlobals = contents # globals of original module
# add ourselves to sys.modules, overwriting the original module
sys.modules[name] = self
# the above line assigns a None value to all entries in the original globals.
# luckily, we have a copy on this module we can use to restore it.
self._lazyGlobals.update( self.__dict__ )
@property
def __all__(self):
public = [ x for x in self.__dict__.keys() + self.__class__.__dict__.keys() if not x.startswith('_') ]
return public
@classmethod
def _lazyModule_addAttr(cls, name, creator, *creatorArgs, **creatorKwargs):
lazyObj = cls.LazyLoader(name, creator, *creatorArgs, **creatorKwargs)
setattr( cls, name, lazyObj )
return lazyObj
def __setitem__(self, attr, args):
"""
dynModule['attrName'] = ( callbackFunc, ( 'arg1', ), {} )
"""
# args will either be a single callable, or will be a tuple of
# ( callable, (args,), {kwargs} )
if hasattr( args, '__call__'):
callback = args
elif isinstance( args, (tuple, list) ):
if len(args) >= 1:
assert hasattr( args[0], '__call__' ), 'first argument must be callable'
callback = args[0]
else:
raise ValueError, "must supply at least one argument"
if len(args) >= 2:
assert hasattr( args[1], '__iter__'), 'second argument must be iterable'
cb_args = args[1]
else:
cb_args = ()
cb_kwargs = {}
if len(args) == 3:
assert operator.isMappingType(args[2]), 'third argument must be a mapping type'
cb_kwargs = args[2]
else:
cb_kwargs = {}
if len(args) > 3:
raise ValueError, "if args and kwargs are desired, they should be passed as a tuple and dictionary, respectively"
else:
raise ValueError, "the item must be set to a callable, or to a 3-tuple of (callable, (args,), {kwargs})"
self._lazyModule_addAttr(attr, callback, *cb_args, **cb_kwargs)
def __getitem__(self, attr):
"""
return a LazyLoader without initializing it, or, if a LazyLoader does not exist with this name,
a real object
"""
try:
return self.__class__.__dict__[attr]
except KeyError:
return self.__dict__[attr]
# Sort of a cumbersome name, but we want to make sure it doesn't conflict with any
# 'real' entries in the module
def _lazyModule_update(self):
"""
Used to update the contents of the LazyLoadModule with the contents of another dict.
"""
# For debugging, print out a list of things in the _lazyGlobals that
# AREN'T in __dict__
# print "_lazyModule_update:"
# print "only in dynamic module:", [x for x in
# (set(self.__class__.__dict__) | set(self.__dict__))- set(self._lazyGlobals)
# if not x.startswith('__')]
self.__dict__.update(self._lazyGlobals)
return _LazyLoadModule(name, contents)
# Note - since anything referencing attributes that only exist on the lazy module
# must be prefaced with a ref to the lazy module, if we are converting a pre-existing
# module to include LazyLoaded objects, we must manually go through and edit
# any references to those objects to have a 'lazyModule' prefix (or similar).
# To aid in this process, I recommend:
# 1. Uncommenting out the final print statement in _updateLazyModule
# 2. Grabbing the output of the print statement, throw it into a text editor with
# regexp find/replace capabilities
# 3. You should have a python list of names.
# Replace the initial and final bracket and quote - [' and '] - with opening
# and closing parentheses - ( and )
# Then find / replace all occurrences of:
# ', '
# with:
# |
# ...and you should be left with a regular expression you can use to find and replace
# in your original code...
# (you may also want to put (?<=\W) / (?=\W) in front / behind the regexp...)
# Don't do the regexp find / replace on the source code blindly, though!
# ...also, when you make the call to _updateLazyModule that prints out the list of
# dynamic-module-only attributes, you should do it from a GUI maya - there are some objects
# that only exist in GUI-mode...
class LazyDocStringError(Exception): pass
class LazyDocString(types.StringType):
"""
Set the __doc__ of an object to an object of this class in order to have
a docstring that is dynamically generated when used.
Due to restrictions of inheriting from StringType (which is necessary,
as the 'help' function does a check to see if __doc__ is a string),
the creator can only take a single object.
Since the object initialization requires multiple parameters, the
LazyDocString should be fed an sliceable-iterable on creation,
of the following form:
LazyDocString( [documentedObj, docGetter, arg1, arg2, ...] )
documentedObj should be the object on which we are placing the docstring
docGetter should be a function which is used to retrieve the 'real'
docstring - it's args will be documentedObj and any extra args
passed to the object on creation.
Example Usage:
>>> def getDocStringFromDict(obj):
... returnVal = docStringDict[obj]
... return returnVal
>>>
>>> # In order to alter the doc of a class, we need to use a metaclass
>>> class TestMetaClass(type): pass
>>>
>>> class TestClass(object):
... __metaclass__ = TestMetaClass
...
... def aMethod(self):
... pass
...
... aMethod.__doc__ = LazyDocString( (aMethod, getDocStringFromDict, (aMethod,)) )
>>>
>>> TestClass.__doc__ = LazyDocString( (TestClass, getDocStringFromDict, (TestClass,)) )
>>>
>>>
>>> docStringDict = {TestClass:'New Docs for PynodeClass!',
... TestClass.aMethod.im_func:'Method docs!'}
>>>
>>> TestClass.__doc__
'New Docs for PynodeClass!'
>>> TestClass.aMethod.__doc__
'Method docs!'
Note that new-style classes (ie, instances of 'type') and instancemethods
can't have their __doc__ altered.
In the case of classes, you can get around this by using a metaclass for
the class whose docstring you wish to alter.
In the case of instancemethods, just set the __doc__ on the function
underlying the method (ie, myMethod.im_func). Note that if the __doc__
for the method is set within the class definition itself, you will
already automatically be modifying the underlying function.
"""
def __init__(self, argList):
if len(argList) < 2:
raise LazyDocStringError('LazyDocString must be initialized with an iterable of the form: LazyDocString( [documentedObj, docGetter, arg1, arg2, ...] )')
documentedObj = argList[0]
docGetter = argList[1]
if len(argList) > 2:
args = argList[2]
if len(argList) == 4:
kwargs = argList[3]
else:
kwargs = {}
else:
args = ()
kwargs = {}
try:
# put in a placeholder docstring, and check to make
# sure we can change the __doc__ of this object!
documentedObj.__doc__ = 'LazyDocString placeholder'
except AttributeError:
raise LazyDocStringError('cannot modify the docstring of %r objects' % documentedObj.__class__.__name__)
self.documentedObj = documentedObj
self.docGetter = docGetter
self.args = args
self.kwargs = kwargs
def __str__(self):
#print "creating docstrings", self.docGetter, self.args, self.kwargs
self.documentedObj.__doc__ = self.docGetter(*self.args, **self.kwargs)
return self.documentedObj.__doc__
def __repr__(self):
return repr(str(self))
for _name, _method in inspect.getmembers(types.StringType, inspect.isroutine):
if _name.startswith('_'):
continue
def makeMethod(name):
def LazyDocStringMethodWrapper(self, *args, **kwargs):
return getattr(str(self), name)(*args, **kwargs)
return LazyDocStringMethodWrapper
setattr(LazyDocString, _name, makeMethod(_name) )
def addLazyDocString( object, creator, *creatorArgs, **creatorKwargs):
"""helper for LazyDocString. Equivalent to :
object.__doc__ = LazyDocString( (object, creator, creatorArgs, creatorKwargs) )
"""
object.__doc__ = LazyDocString( (object, creator, creatorArgs, creatorKwargs) )
class TwoWayDict(dict):
"""
A dictionary that can also map in reverse: value to key.
>>> twd = TwoWayDict( {3:'foobar'} )
>>> twd[3]
'foobar'
>>> twd.get_key('foobar')
3
Entries in both sets (keys and values) must be unique within that set, but
not necessarily across the two sets - ie, you may have 12 as both a key and
a value, but you may not have two keys which both map to 12 (or, as with a
regular dict, two key entries for 12).
If a key is updated to a new value, get_key for the old value will raise
a KeyError:
>>> twd = TwoWayDict( {3:'old'} )
>>> twd[3] = 'new'
>>> twd[3]
'new'
>>> twd.get_key('new')
3
>>> twd.get_key('old')
Traceback (most recent call last):
...
KeyError: 'old'
Similarly, if a key is updated to an already-existing value, then the old key
will be removed from the dictionary!
>>> twd = TwoWayDict( {'oldKey':'aValue'} )
>>> twd['newKey'] = 'aValue'
>>> twd['newKey']
'aValue'
>>> twd.get_key('aValue')
'newKey'
>>> twd['oldKey']
Traceback (most recent call last):
...
KeyError: 'oldKey'
If a group of values is fed to the TwoWayDict (either on initialization, or
through 'update', etc) that is not consistent with these conditions, then the
resulting dictionary is indeterminate; however, it is guaranteed to be a valid/
uncorrupted TwoWayDict.
(This is similar to how dict will allow, for instance, {1:'foo', 1:'bar'}).
>>> twd = TwoWayDict( {1:'foo', 1:'bar'} )
>>> # Is twd[1] 'foo' or 'bar'?? Nobody knows!
>>> # ...however, one of these is guaranteed to raise an error...
>>> twd.get_key('foo') + twd.get_key('bar') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
KeyError: (either 'bar' or 'foo')
>>> twd = TwoWayDict( {1:'foo', 2:'foo'} )
>>> # Is twd.get_key('foo') 1 or 2? Nobody knows!
>>> # ...however, one of these is guaranteed to raise an error...
>>> twd[1] + twd[2] #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
KeyError: (either 1 or 2)
Obviously, such shenannigans should be avoided - at some point in the future, this may
cause an error to be raised...
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
self._reverse = {}
self.update(*args, **kwargs)
def __setitem__(self, k, v):
# Maintain the 1-1 mapping
if dict.__contains__(self, k):
del self._reverse[self[k]]
if v in self._reverse:
dict.__delitem__(self, self.get_key(v))
dict.__setitem__(self, k, v)
self._reverse[v] = k
def has_value(self, v):
return self._reverse.has_key(v)
def __delitem__(self, k):
del self._reverse[self[k]]
dict.__delitem__(self, k)
def clear(self):
self._reverse.clear()
dict.clear(self)
def copy(self):
return TwoWayDict(self)
def pop(self, k):
del self._reverse[self[k]]
return self.pop(k)
def popitem(self, **kws):
raise NotImplementedError()
def setdefault(self, **kws):
raise NotImplementedError()
def update(self, *args, **kwargs):
if not (args or kwargs):
return
if len(args) > 1:
raise TypeError('update expected at most 1 arguments, got %d' % len(args))
# since args may be a couple different things, cast it to a dict to
# simplify things...
if args:
tempDict = dict(args[0])
else:
tempDict = {}
tempDict.update(kwargs)
for key, val in tempDict.iteritems():
self[key] = val
def get_key(self, v):
return self._reverse[v]
class EquivalencePairs(TwoWayDict):
"""
A mapping object similar to a TwoWayDict, with the addition that indexing
and '__contains__' can now be used with keys OR values:
>>> eq = EquivalencePairs( {3:'foobar'} )
>>> eq[3]
'foobar'
>>> eq['foobar']
3
>>> 3 in eq
True
>>> 'foobar' in eq
True
This is intended to be used where there is a clear distinction between
keys and values, so there is little likelihood of the sets of keys
and values intersecting.
The dictionary has the same restrictions as a TwoWayDict, with the added restriction
that an object must NOT appear in both the keys and values, unless it maps to itself.
If a new item is set that would break this restriction, the old keys/values will be
removed from the mapping to ensure these restrictions are met.
>>> eq = EquivalencePairs( {1:'a', 2:'b', 3:'die'} )
>>> eq['a']
1
>>> eq['b']
2
>>> eq[1]
'a'
>>> eq[2]
'b'
>>> del eq['die']
>>> eq[3]
Traceback (most recent call last):
...
KeyError: 3
>>> eq[2] = 1
>>> eq[1]
2
>>> eq[2]
1
>>> eq['a']
Traceback (most recent call last):
...
KeyError: 'a'
>>> eq['b']
Traceback (most recent call last):
...
KeyError: 'b'
# Even though 2 is set as a VALUE, since it already
# exists as a KEY, the 2:'b' mapping is removed,
# so eq['b'] will be invalid...
>>> eq = EquivalencePairs( {1:'a', 2:'b'} )
>>> eq['new'] = 2
>>> eq['new']
2
>>> eq[2]
'new'
>>> eq['b']
Traceback (most recent call last):
...
KeyError: 'b'
# Similarly, if you set as a KEy something that
# already exists as a value...
>>> eq = EquivalencePairs( {1:'a', 2:'b'} )
>>> eq['b'] = 3
>>> eq['b']
3
>>> eq[3]
'b'
>>> eq[2]
Traceback (most recent call last):
...
KeyError: 2
If a group of values is fed to the EquivalencePairs (either on initialization, or
through 'update', etc) that is not consistent with it's restrictions, then the
resulting dictionary is indeterminate; however, it is guaranteed to be a valid/
uncorrupted TwoWayDict.
(This is somewhat similar to the behavior of the dict object itself, which will allow
a definition such as {1:2, 1:4} )
Obviously, such shenannigans should be avoided - at some point in the future, this may
even cause an error to be raised...
Finally, note that a distinction between keys and values IS maintained, for compatibility
with keys(), iter_values(), etc.
"""
def __setitem__(self, k, v):
if k in self:
# this will check if k is in the keys OR values...
del self[k]
if v in self:
del self[v]
dict.__setitem__(self, k, v)
self._reverse[v] = k
def __delitem__(self, key):
if dict.__contains__(self, key):
super(EquivalencePairs, self).__delitem__(key)
elif key in self._reverse:
dict.__delitem__(self, self[key])
del self._reverse[key]
else:
raise KeyError(key)
def __getitem__(self, key):
if dict.__contains__(self, key):
return super(EquivalencePairs, self).__getitem__(key)
elif key in self._reverse:
return self._reverse[key]
else:
raise KeyError(key)
def __contains__(self, key):
return (dict.__contains__(self, key) or
key in self._reverse)
def get(self, key, d=None):
try:
return self.__getitem__(key)
except KeyError:
return d
def alias(origAttrName):
"""
Returns a property which is simply an alias for another property.
Acts simply to provide another name to reference the same
underlying attribute; useful when subclassing, where a subclass
might have a more descriptive name for an attribute that has the
same function.
The only purpose of this function is to produce more readable code.
Example:
>>> class GenericExporter(object):
... def __init__(self, outFile):
... self.outFile = outFile
...
>>> class CowExporter(GenericExporter):
... cowFile = alias('outFile')
...
>>> CowExporter('bessie.cow').cowFile
'bessie.cow'
"""
def getter(self):
return getattr(self, origAttrName)
getter.__name__ = "get_" + origAttrName
def setter(self, value):
setattr(self, origAttrName, value)
setter.__name__ = "set_" + origAttrName
return property(getter, setter)
class propertycache(object):
'''Class for creating properties where the value is initially calculated then stored.
Intended for use as a descriptor, ie:
class MyClass(object):
@propertycache
def aValue(self):
return calcValue()
c = MyClass()
c.aValue
'''
def __init__(self, func):
self.func = func
self.name = func.__name__
def __get__(self, ownerInstance, ownerCls=None):
result = self.func(ownerInstance)
setattr(ownerInstance, self.name, result)
return result
# unit test with doctest
if __name__ == '__main__' :
import doctest
doctest.testmod()
|
CountZer0/PipelineConstructionSet
|
python/maya/site-packages/pymel-1.0.3/pymel/util/utilitytypes.py
|
Python
|
bsd-3-clause
| 43,474 | 0.006073 |
import optparse
from math import sqrt, pi
import numpy as np
from ase.dft.kpoints import monkhorst_pack
def str2dict(s, namespace={}, sep='='):
"""Convert comma-separated key=value string to dictionary.
Examples:
>>> str2dict('xc=PBE,nbands=200,parallel={band:4}')
{'xc': 'PBE', 'nbands': 200, 'parallel': {'band': 4}}
>>> str2dict('a=1.2,b=True,c=ab,d=1,2,3,e={f:42,g:cd}')
{'a': 1.2, 'c': 'ab', 'b': True, 'e': {'g': 'cd', 'f': 42}, 'd': (1, 2, 3)}
"""
dct = {}
s = (s + ',').split(sep)
for i in range(len(s) - 1):
key = s[i]
m = s[i + 1].rfind(',')
value = s[i + 1][:m]
if value[0] == '{':
assert value[-1] == '}'
value = str2dict(value[1:-1], {}, ':')
else:
try:
value = eval(value, namespace)
except (NameError, SyntaxError):
pass
dct[key] = value
s[i + 1] = s[i + 1][m + 1:]
return dct
class CalculatorFactory:
def __init__(self, Class, name, label='label',
kpts=None, kptdensity=3.0,
**kwargs):
"""Calculator factory object.
Used to create calculators with specific parameters."""
self.Class = Class
self.name = name
self.label = label
self.kpts = kpts
self.kptdensity = kptdensity
self.kwargs = kwargs
def calculate_kpts(self, atoms):
"""Estimate an appropriate number of k-points."""
if self.kpts is not None:
# Number of k-points was explicitely set:
return self.kpts
# Use kptdensity to make a good estimate:
recipcell = atoms.get_reciprocal_cell()
kpts = []
for i in range(3):
if atoms.pbc[i]:
k = 2 * pi * sqrt((recipcell[i]**2).sum()) * self.kptdensity
kpts.append(max(1, 2 * int(round(k / 2))))
else:
kpts.append(1)
return kpts
def __call__(self, name, atoms):
"""Create calculator.
Put name in the filename of all created files."""
kpts = self.calculate_kpts(atoms)
if kpts != 'no k-points':
if self.name == 'aims': # XXX Aims uses k_grid!
self.kwargs['k_grid'] = kpts
else:
self.kwargs['kpts'] = kpts
if self.label is not None:
self.kwargs[self.label] = name
return self.Class(**self.kwargs)
if self.label is None:
return self.Class(**self.kwargs)
else:
return self.Class(name, **self.kwargs)
def add_options(self, parser):
calc = optparse.OptionGroup(parser, 'Calculator')
calc.add_option('-k', '--monkhorst-pack',
metavar='K1,K2,K3',
help='Monkhorst-Pack sampling of BZ. Example: ' +
'"4,4,4": 4x4x4 k-points, "4,4,4g": same set of ' +
'k-points shifted to include the Gamma point.')
calc.add_option('--k-point-density', type='float', default=3.0,
help='Density of k-points in Angstrom.')
calc.add_option('-p', '--parameters', metavar='key=value,...',
help='Comma-separated key=value pairs of ' +
'calculator specific parameters.')
parser.add_option_group(calc)
def parse(self, opts, args):
mp = opts.monkhorst_pack
if mp is not None:
if mp[-1].lower() == 'g':
kpts = np.array([int(k) for k in mp[:-1].split(',')])
shift = 0.5 * ((kpts + 1) % 2) / kpts
self.kpts = monkhorst_pack(kpts) + shift
else:
self.kpts = [int(k) for k in mp.split(',')]
self.kptdensity = opts.k_point_density
if opts.parameters:
self.kwargs.update(str2dict(opts.parameters))
# Recognized names of calculators sorted alphabetically:
calcnames = ['abinit', 'aims', 'asap', 'castep', 'dftb', 'elk', 'emt',
'exciting', 'fleur', 'gpaw', 'gaussian', 'hotbit', 'jacapo',
'lammps', 'lj', 'mopac', 'morse',
'nwchem', 'siesta', 'turbomole', 'vasp']
classnames = {'asap': 'EMT',
'aims': 'Aims',
'elk': 'ELK',
'emt': 'EMT',
'fleur': 'FLEUR',
'gaussian': 'Gaussian',
'jacapo': 'Jacapo',
'lammps': 'LAMMPS',
'lj': 'LennardJones',
'mopac': 'Mopac',
'morse': 'MorsePotential',
'nwchem': 'NWChem',
'vasp': 'Vasp'}
def calculator_factory(name, **kwargs):
"""Create an ASE calculator factory."""
if name == 'abinit':
from ase.calculators.abinit import Abinit
return CalculatorFactory(Abinit, 'Abinit', 'label', **kwargs)
if name == 'aims':
from ase.calculators.aims import Aims
return CalculatorFactory(Aims, 'aims', 'label', **kwargs)
if name == 'nwchem':
from ase.calculators.nwchem import NWChem
return CalculatorFactory(NWChem, 'NWChem', 'label', 'no k-points',
**kwargs)
if name == 'asap':
from asap3 import EMT
return CalculatorFactory(EMT, 'Asap', None, 'no k-points', **kwargs)
if name == 'elk':
from ase.calculators.elk import ELK
return CalculatorFactory(ELK, 'ELK', 'label', **kwargs)
if name == 'fleur':
from ase.calculators.fleur import FLEUR
return CalculatorFactory(FLEUR, 'FLEUR', 'workdir', **kwargs)
if name == 'gpaw':
from gpaw.factory import GPAWFactory
return GPAWFactory(**kwargs)
if name == 'hotbit':
from hotbit import Calculator
return CalculatorFactory(Calculator, 'Hotbit', 'txt', 'no k-points',
**kwargs)
if name == 'jacapo':
from ase.calculators.jacapo import Jacapo
return CalculatorFactory(Jacapo, 'Jacapo', 'nc', **kwargs)
if name == 'vasp':
from ase.calculators.vasp import Vasp
return CalculatorFactory(Vasp, 'Vasp', None, **kwargs)
classname = classnames.get(name, name.title())
module = __import__('ase.calculators.' + name, {}, None, [classname])
Class = getattr(module, classname)
if name in ['emt', 'gaussian', 'lammps', 'lj', 'mopac', 'morse']:
kpts = 'no k-points'
else:
kpts = None
if name in ['emt', 'lj', 'morse']:
label = None
else:
label = 'label'
return CalculatorFactory(Class, classname, label, kpts, **kwargs)
|
alexei-matveev/ase-local
|
ase/tasks/calcfactory.py
|
Python
|
gpl-2.0
| 6,674 | 0.00015 |
import os
import sys
if os.environ.get("DJANGO_PRODUCTION_MODE"):
from settings.cloud import *
else:
# When not using production mode try to load local.py
try:
from settings.local import *
except ImportError:
sys.stderr.write(
"Couldn't import settings.local, have you created it from settings/local.py.example ?\n"
)
sys.exit(1)
|
thorgate/django-project-template
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/__init__.py
|
Python
|
isc
| 392 | 0.002551 |
import time
import json
import pytz
from datetime import datetime, timedelta
from django.utils import timezone
from django.conf import settings
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from ..models.security import (
SecurityLoginAttemptIncorrect, SecurityLoginAttemptCorrect
)
@api_view(['GET'])
@permission_classes((IsAdminUser, ))
def correctlogins_data(request):
date_start_raw = request.GET.get('date_start')
date_end_raw = request.GET.get('date_end')
date_start_tz = None
date_end_tz = None
if not date_start_raw or not date_end_raw:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
else:
date_start = datetime.fromtimestamp(int(date_start_raw))
date_start_tz = pytz.timezone(settings.TIME_ZONE).localize(date_start, is_dst=None)
date_end = datetime.fromtimestamp(int(date_end_raw))
date_end_tz = pytz.timezone(settings.TIME_ZONE).localize(date_end, is_dst=None)
if date_start_tz == date_end_tz:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
count_hosts = []
temp_hosts = {}
temp_users = {}
dates = []
values = SecurityLoginAttemptCorrect.objects.filter(time__range=[date_start_tz, date_end_tz])
count_correct_attempt = 0
for p in values:
value = json.loads(p.value)
attempt_count = 0
for host, v in value.get("hosts", {}).items():
attempt_count += v.get("count", 0)
count_correct_attempt += attempt_count
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if host in temp_hosts:
temp_hosts[host]["count"] = temp_hosts[host]["count"] + v.get("count", 0)
temp_hosts[host]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_hosts[host] = {
"host": host,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
for username, v in value.get("users", {}).items():
attempt_count += v.get("count", 0)
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if username in temp_users:
temp_users[username]["count"] = temp_users[username]["count"] + v.get("count", 0)
temp_users[username]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_users[username] = {
"username": username,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
count_hosts.append(attempt_count)
dates.append(timezone.localtime(p.time).strftime("%b %d %H:%M"))
hosts = []
for i in temp_hosts:
hosts.append(temp_hosts[i])
if hosts:
hosts.sort(key=lambda x: x["count"], reverse=True)
hosts = hosts[:100]
users = []
for i in temp_users:
users.append(temp_users[i])
if users:
users.sort(key=lambda x: x["count"], reverse=True)
users = users[:100]
date_range = {
"start": time.mktime(timezone.localtime(date_start_tz).timetuple()), # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"start_date": time.mktime(timezone.localtime(date_start_tz).timetuple()) + 10, # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"end_date": time.mktime(timezone.localtime(date_end_tz).timetuple()),
}
if values:
date_range["start"] = time.mktime(timezone.localtime(values[0].time).timetuple())
start_obj = SecurityLoginAttemptCorrect.objects.all().first()
if start_obj:
date_range["start_date"] = time.mktime(timezone.localtime(start_obj.time).timetuple())
if date_range["start_date"] == date_range["end_date"]:
date_range["end_date"] += 10
return Response({
"values": [{
"data": count_hosts,
"label": 'Number of login'
}],
"dates": dates,
"date_range": date_range,
"count_correct_attempt": count_correct_attempt,
"hosts": hosts,
"users": users
}, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes((IsAdminUser, ))
def incorrectlogins_data(request):
date_start_raw = request.GET.get('date_start')
date_end_raw = request.GET.get('date_end')
date_start_tz = None
date_end_tz = None
if not date_start_raw or not date_end_raw:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
else:
date_start = datetime.fromtimestamp(int(date_start_raw))
date_start_tz = pytz.timezone(settings.TIME_ZONE).localize(date_start, is_dst=None)
date_end = datetime.fromtimestamp(int(date_end_raw))
date_end_tz = pytz.timezone(settings.TIME_ZONE).localize(date_end, is_dst=None)
if date_start_tz == date_end_tz:
now = timezone.now()
date_start_tz = now - timedelta(hours=24)
date_end_tz = now
count_incorrect_attepmt = 0
count_hosts = []
temp_hosts = {}
temp_users = {}
dates = []
values = SecurityLoginAttemptIncorrect.objects.filter(time__range=[date_start_tz, date_end_tz])
for p in values:
value = json.loads(p.value)
attempt_count = 0
for host, v in value.get("hosts", {}).items():
attempt_count += v.get("count", 0)
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if host in temp_hosts:
temp_hosts[host]["count"] = temp_hosts[host]["count"] + v.get("count", 0)
temp_hosts[host]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_hosts[host] = {
"host": host,
"count": v.get("count", 0),
"last_date": date_tz.strftime("%b %d %H:%M")
}
for user, v in value.get("users", {}).items():
attempt_count += v.get("count")
raw_date = v.get("last_date")
date_tz = None
if raw_date:
date = datetime.fromtimestamp(int(raw_date))
date_tz = pytz.timezone(settings.TIME_ZONE).localize(date, is_dst=None)
if user in temp_users:
temp_users[user]["count"] = temp_users[user]["count"] + v.get("count")
temp_users[user]["last_date"] = date_tz.strftime("%b %d %H:%M")
else:
temp_users[user] = {
"username": user,
"count": v.get("count"),
"last_date": date_tz.strftime("%b %d %H:%M")
}
count_incorrect_attepmt += attempt_count
count_hosts.append(attempt_count)
dates.append(timezone.localtime(p.time).strftime("%b %d %H:%M"))
hosts = []
for i in temp_hosts:
hosts.append(temp_hosts[i])
if hosts:
hosts.sort(key=lambda x: x["count"], reverse=True)
hosts = hosts[:100]
users = []
for i in temp_users:
users.append(temp_users[i])
if users:
users.sort(key=lambda x: x["count"], reverse=True)
users = users[:100]
date_range = {
"start": time.mktime(timezone.localtime(date_start_tz).timetuple()), # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"start_date": time.mktime(timezone.localtime(date_start_tz).timetuple()) + 10, # time.mktime(timezone.localtime(timezone.now()).timetuple()),
"end_date": time.mktime(timezone.localtime(date_end_tz).timetuple()),
}
if values:
date_range["start"] = time.mktime(timezone.localtime(values[0].time).timetuple())
start_obj = SecurityLoginAttemptIncorrect.objects.all().first()
if start_obj:
date_range["start_date"] = time.mktime(timezone.localtime(start_obj.time).timetuple())
if date_range["start_date"] == date_range["end_date"]:
date_range["end_date"] += 10
return Response({
"values": [{
"data": count_hosts,
"label": 'Number of attempt incorrect login'
}],
"dates": dates,
"date_range": date_range,
"count_incorrect_attepmt": count_incorrect_attepmt,
"hosts": hosts,
"users": users
}, status=status.HTTP_200_OK)
|
dspichkin/djangodashpanel
|
djangodashpanel/security/views.py
|
Python
|
gpl-3.0
| 9,113 | 0.002743 |
import smbl
__RULES=set()
def register_rule(rule):
registered_rules=[r.encode() for r in get_registered_rules()]
if rule.encode() not in registered_rules:
__RULES.add(rule)
def get_registered_rules():
return list(__RULES)
class Rule:
def __init__(self,input,output,run):
self.__input=input
self.__output=output
self.__run=run
register_rule(self)
def get_input(self):
return self.__input
def get_output(self):
return self.__output
def run(self):
self.__run()
def encode(self):
return "{} {}".format(str(self.__input),str(self.__output))
|
karel-brinda/smbl
|
smbl/utils/rule.py
|
Python
|
mit
| 572 | 0.050699 |
import numpy as np
from numpy.testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import pytest
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE, PREDICTOR_RECORD_DTYPE, ALMOST_INF, X_BINNED_DTYPE,
X_BITSET_INNER_DTYPE, X_DTYPE)
from sklearn.ensemble._hist_gradient_boosting._bitset import (
set_bitset_memoryview, set_raw_bitset_from_binned_bitset)
@pytest.mark.parametrize('n_bins', [200, 256])
def test_regression_dataset(n_bins):
X, y = make_regression(n_samples=500, n_features=10, n_informative=5,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42)
mapper = _BinMapper(n_bins=n_bins, random_state=42)
X_train_binned = mapper.fit_transform(X_train)
# Init gradients and hessians to that of least squares loss
gradients = -y_train.astype(G_H_DTYPE)
hessians = np.ones(1, dtype=G_H_DTYPE)
min_samples_leaf = 10
max_leaf_nodes = 30
grower = TreeGrower(X_train_binned, gradients, hessians,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes, n_bins=n_bins,
n_bins_non_missing=mapper.n_bins_non_missing_)
grower.grow()
predictor = grower.make_predictor(
binning_thresholds=mapper.bin_thresholds_)
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map)
assert r2_score(y_train, y_pred_train) > 0.82
y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map)
assert r2_score(y_test, y_pred_test) > 0.67
@pytest.mark.parametrize('num_threshold, expected_predictions', [
(-np.inf, [0, 1, 1, 1]),
(10, [0, 0, 1, 1]),
(20, [0, 0, 0, 1]),
(ALMOST_INF, [0, 0, 0, 1]),
(np.inf, [0, 0, 0, 0]),
])
def test_infinite_values_and_thresholds(num_threshold, expected_predictions):
# Make sure infinite values and infinite thresholds are handled properly.
# In particular, if a value is +inf and the threshold is ALMOST_INF the
# sample should go to the right child. If the threshold is inf (split on
# nan), the +inf sample will go to the left child.
X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes[0]['left'] = 1
nodes[0]['right'] = 2
nodes[0]['feature_idx'] = 0
nodes[0]['num_threshold'] = num_threshold
# left child
nodes[1]['is_leaf'] = True
nodes[1]['value'] = 0
# right child
nodes[2]['is_leaf'] = True
nodes[2]['value'] = 1
binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
predictor = TreePredictor(
nodes, binned_cat_bitsets, raw_categorical_bitsets)
predictions = predictor.predict(X, known_cat_bitset, f_idx_map)
assert np.all(predictions == expected_predictions)
@pytest.mark.parametrize(
'bins_go_left, expected_predictions', [
([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]),
([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]),
([3, 5, 6], [0, 0, 0, 1, 0, 1])
])
def test_categorical_predictor(bins_go_left, expected_predictions):
# Test predictor outputs are correct with categorical features
X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T
categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE)
bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
nodes[0]['left'] = 1
nodes[0]['right'] = 2
nodes[0]['feature_idx'] = 0
nodes[0]['is_categorical'] = True
nodes[0]['missing_go_to_left'] = True
# left child
nodes[1]['is_leaf'] = True
nodes[1]['value'] = 1
# right child
nodes[2]['is_leaf'] = True
nodes[2]['value'] = 0
binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
for go_left in bins_go_left:
set_bitset_memoryview(binned_cat_bitsets[0], go_left)
set_raw_bitset_from_binned_bitset(raw_categorical_bitsets[0],
binned_cat_bitsets[0], categories)
predictor = TreePredictor(nodes, binned_cat_bitsets,
raw_categorical_bitsets)
# Check binned data gives correct predictions
prediction_binned = predictor.predict_binned(X_binned,
missing_values_bin_idx=6)
assert_allclose(prediction_binned, expected_predictions)
# manually construct bitset
known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32)
known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32)
f_idx_map = np.array([0], dtype=np.uint32)
# Check with un-binned data
predictions = predictor.predict(categories.reshape(-1, 1),
known_cat_bitsets, f_idx_map)
assert_allclose(predictions, expected_predictions)
# Check missing goes left because missing_values_bin_idx=6
X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T
predictions = predictor.predict_binned(X_binned_missing,
missing_values_bin_idx=6)
assert_allclose(predictions, [1])
# missing and unknown go left
predictions = predictor.predict(np.array([[np.nan, 17]], dtype=X_DTYPE).T,
known_cat_bitsets, f_idx_map)
assert_allclose(predictions, [1, 1])
|
kevin-intel/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py
|
Python
|
bsd-3-clause
| 6,256 | 0 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import layers
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32, dtypes.float64]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16, dtypes.float64]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.info("expected = ", e_value)
tf_logging.info("actual = ", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-4)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides, padding,
expected):
tensors = []
dilations = [1, 1]
for (data_format, use_gpu) in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.info("expected = ", expected)
tf_logging.info("actual = ", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu, err):
total_output_size = 1
total_filter_size = 1
for s in output_sizes:
total_output_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_filter_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
with test_util.device(use_gpu):
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0, t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.info("expected = ", expected)
tf_logging.info("actual = ", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
total_input_size = 1
total_output_size = 1
for s in input_sizes:
total_input_size *= s
for s in output_sizes:
total_output_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
explicit_strides = [1] + strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=padding,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.info("expected = ", expected)
tf_logging.info("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = sess.run(conv)
value_2 = sess.run(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.info("expected = ", value_2)
tf_logging.info("actual = ", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
total_input_size = 1
total_filter_size = 1
for s in input_sizes:
total_input_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = sess.run(conv)
value_2 = sess.run(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.info("expected = ", value_2)
tf_logging.info("actual = ", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Gradient checkers
def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
filter_cols, in_depth, out_depth, stride_rows,
stride_cols, padding, test_input, data_format,
use_gpu):
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
else:
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.info("conv_2d gradient error = ", err)
self.assertLess(err, 0.002)
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[2, 1, 1, 1],
padding="SAME"))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"strides in the batch and depth"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 2],
padding="SAME"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1],
padding="VALID"))
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
tf_logging.info("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = sess.run(conv)
tf_logging.info("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = sess.run([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = sess.run([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = layers.convolution2d(x, num_outputs, [1, kernel_w])
outputs = x
variables.global_variables_initializer().run()
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
|
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/conv_ops_test.py
|
Python
|
apache-2.0
| 73,074 | 0.005351 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20141128_0831'),
]
operations = [
migrations.AlterField(
model_name='acl',
name='user',
field=models.ForeignKey(to='api.UserData', related_name='acls'),
),
]
|
Virako/authapi
|
authapi/api/migrations/0004_auto_20141128_0914.py
|
Python
|
agpl-3.0
| 419 | 0 |
# Benchmarks for nibabel
|
ME-ICA/me-ica
|
meica.libs/nibabel/benchmarks/__init__.py
|
Python
|
lgpl-2.1
| 25 | 0 |
# ex:set ts=4 sw=4: <- for vim
#
# EPANET File Utility
# Uses EPAENTOutputFile.py to read the EPANET output file into memory and
# then displays the content in different ways.
#
# Dependencies:
# - Python 2.6 or 2.7 (32- or 64-bit)
# - wxPython 3.0.0 (32- or 64-bit to match installed version of Python)
# - EPANETOutputFile/EPANETOutputFile.py
#
# Available translations/locales:
# en_AU.UTF-8
import wx
import wx.gizmos
import wx.propgrid as wxpg
import os
import platform
import codecs
import sys
import gettext
from EPANETOutputFile import EPANETOutputFile
from DataPage import DataPage
from TablePage import TablePage
from ExportPage import ExportPage
_hasXLRD = True
try:
import xlrd
#_hasXLRD = False
except ImportError:
_hasXLRD = False
_hasXLWT = True
try:
import xlwt
#_hasXLWT = False
except ImportError:
_hasXLWT = False
_hasXLUTILS = True
try:
import xlutils
#_hasXLUTILS = False
except ImportError:
_hasXLUTILS = False
def main():
# New versions of wxPython require us to create the app very early, so...
# Create a new app, don't redirect stdout/stderr to a window.
app = wx.App(False)
# mostly taken from the wxPython internationalisation example...
# but in the end not using wx Locale because of the difficulty of
# mapping from language name (string) to wx language constant (number)
# initialise language settings:
path = sys.path[0].decode(sys.getfilesystemencoding())
try:
langIni = codecs.open(os.path.join(path,u'language.ini'),'r', 'utf-8')
except IOError:
#language = u'en' #defaults to english
#print('Could not read language.ini')
language = None
pass
else:
language = langIni.read()
locales = {
u'en' : (wx.LANGUAGE_ENGLISH, u'en_US.UTF-8'),
#u'es' : (wx.LANGUAGE_SPANISH, u'es_ES.UTF-8'),
#u'fr' : (wx.LANGUAGE_FRENCH, u'fr_FR.UTF-8'),
}
langdir = os.path.join(path,u'locale')
if language is None:
Lang = gettext.translation(u'EPANETFileUtility', langdir,
fallback=True)
Lang.install(unicode=1)
if Lang.__class__.__name__ == 'NullTranslations' and str(Lang.__class__) == 'gettext.NullTranslations':
print('Language not found')
else:
try:
language = Lang._info['language']
print('Language %s found.' % language)
except (KeyError):
print('Language found (details not available).')
# Lang.info() content seems to depend on the .mo file containing
# the correct language information. If it is not set, the list
# returned is empty and there doesn't seem to be any way to find
# the information
#print('Lang.info() = %s' % Lang.info())
#language = Lang._info['language']
# TODO convert from language name (string) to wx.LANGUAGE_... (number)
#mylocale = wx.Locale(language, wx.LOCALE_LOAD_DEFAULT)
else:
Lang = gettext.translation(u'EPANETFileUtility', langdir, languages=[language])
Lang.install(unicode=1)
#mylocale = wx.Locale(locales[language][0], wx.LOCALE_LOAD_DEFAULT)
if platform.system() == 'Linux':
try:
# to get some language settings to display properly:
os.environ['LANG'] = locales[language][1]
except (ValueError, KeyError):
pass
# A Frame is a top-level window.
frame = MyFrame(None, _("EPANET File Utility"))
app.MainLoop()
def getNextImageID(count):
imID = 0
while True:
yield imID
imID += 1
if imID == count:
imID = 0
"""
Our main panel contains the following:
- a menu bar
- a Frame with a box sizer containing a MyListbook with pictures down the
LHS for the Data/Tables/Graphs/Export options.
At the start, we put a box sizer and a Panel containing a box sizer
with a ColouredPanel in each page: this
must be replaced with valid content when a file is loaded
- TODO allow file name to be specified on the command line
- at startup time, we open a data file and build an EPANETOutputFile object
which we display by:
- creating a box sizer in the MyListbook 'Data' page and adding to it:
- a PropertyGridManager with 4 pages, viz:
- Prolog: properties read from the prolog of the data file but not
including the node and link information
- Energy Usage: a single property
- Dynamic Results: a property grid
- Epilog: a property grid
- a box sizer (treesizer) in which we switch TreeListCtrls as necessary
for the different pages of the PropertyGridManager
"""
class MyFrame(wx.Frame):
""" We simply derive a new class of Frame. """
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(800,600),
#style = wx.SIMPLE_BORDER | wx.TAB_TRAVERSAL
)
self.control = MyListbook(self, -1, None)
self.basetitle = title
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.control, 1, wx.GROW)
self.dataPage = None
self.tablePage = None
self.exportPage = None
self.dirname = ''
self.filename = None
self.epanetoutputfile = None
il = wx.ImageList(80, 80)
bmp = wx.Bitmap('images/led_circle_yellow.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_orange.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap( 'images/led_circle_blue.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_green.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_purple.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_red.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_grey.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
bmp = wx.Bitmap('images/led_circle_black.png', wx.BITMAP_TYPE_PNG)
il.Add(bmp)
self.control.AssignImageList(il)
imageIdGenerator = getNextImageID(il.GetImageCount())
# Now make a bunch of panels for the list book
colourList = [ "Yellow", "Coral", "Medium orchid", "Green", ]
titleList = [ _("Data"),
_("Tables"),
_("Graphs"),
_("Export"),
]
for i in range(len(titleList)):
colour = colourList[i]
title = titleList[i]
if i == 0:
self.dataPage = win = DataPage(self, self.control, colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
elif i == 1:
self.tablePage = win = TablePage(self, self.control, colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
elif i == 2:
win = self.makeColourPanel(colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
sizer = wx.BoxSizer(wx.VERTICAL)
win.win.SetSizer(sizer)
st = wx.StaticText(win.win, -1,
_(
"""EPANET File Utility
Displaying graphs is not yet supported."""))
sizer.Add(st, 1, wx.GROW | wx.ALL, 10)
elif i == 3:
self.exportPage = win = ExportPage(self, self.control, colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
else:
win = self.makeColourPanel(colour)
self.control.AddPage(win, title, imageId=imageIdGenerator.next())
win = self.control.GetPage(i)
st = wx.StaticText(win.win, -1,
_("EPANET File Utility."),
wx.Point(10, 10))
#win = self.makeColourPanel(colour)
#st = wx.StaticText(win.win, -1, "this is a sub-page", (10,10))
#self.control.AddSubPage(win, 'a sub-page', imageId=imageIdGenerator.next())
self.control.Bind(wx.EVT_LISTBOOK_PAGE_CHANGED, self.control.OnPageChanged)
self.control.Bind(wx.EVT_LISTBOOK_PAGE_CHANGING, self.control.OnPageChanging)
# A Statusbar in the bottom of the window used with menu help text, etc.
self.CreateStatusBar()
self.SetStatusBarPane(0)
# Setting up the menu.
filemenu= wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.
# wx.ID_OPEN
menuOpen = filemenu.Append(wx.ID_OPEN, _("&Open..."),_(" Open an EPANET output file"))
self.Bind(wx.EVT_MENU, self.OnOpen, menuOpen)
menuAbout = filemenu.Append(wx.ID_ABOUT, _("&About"),_(" Information about this program"))
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
filemenu.AppendSeparator()
menuExit = filemenu.Append(wx.ID_EXIT,_("E&xit"),_(" Terminate the program"))
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,_("&File")) # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.Show(True)
# we need a Listbook where each page contains a PropertyGrid
# as long as Property Grids can have more than 2 columns (for diffing.)
# possibly a Treebook would also work.
# we need to be able to show graphs as well as just tabular output
# and also options to export the data in a controlled way.
# TODO iff no filename has been given, open a file
#self.OnOpen(None)
# after upgrading to wxPython 3.0.0 we can't call OnOpen any more
# as the Open panel displays and then closes with a cancel
# message. Instead we bind to the idle event which is
# called after startup is complete. This works.
self.Bind(wx.EVT_IDLE, self.OnStartup)
def OnStartup(self, event):
self.Unbind(wx.EVT_IDLE)
self.OnOpen(event)
event.Skip()
def OnOpen(self, event):
""" Open a file"""
dlg = wx.FileDialog(self, _("Choose a file"), self.dirname, "", "*", wx.OPEN)
try:
result = dlg.ShowModal()
if result == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
progress = MyProgressDialog(200)
try:
progress.SetStepLimits(0,100)
self.epanetoutputfile = eof = EPANETOutputFile.EPANETOutputFile([
'-vs',
'--demo_all',
# os.path.join(self.dirname, self.filename)
self.dirname+os.sep+self.filename],
progress)
progress.SetStepLimits(100,200)
progress.Update(1,_('Displaying data'))
# configure the data, tables and export pages
self.dataPage.OnOpen(event, progress)
self.tablePage.OnOpen(event, progress)
self.exportPage.OnOpen(event, progress)
#self.SetAutoLayout(True)
self.Layout()
progress.Update(100,_('Done'))
#except Exception as ex:
# #print(ex)
# raise ex
finally:
progress.Hide()
progress.Destroy()
self.SetTitle('%s: %s' % (self.basetitle, self.filename))
else:
#print("FileDialog.ShowModal selection didn't work: returned %s" % result)
pass
except Exception as ex:
print(ex)
errdlg = wx.MessageDialog(self, str(ex), _('Error'), style=wx.OK | wx.ICON_ERROR)
errdlg.ShowModal()
errdlg.Destroy()
finally:
dlg.Destroy()
def OnAbout(self, event):
# A message dialog box with an OK button. wx.OK is a standard ID in wxWidgets.
dlg = wx.MessageDialog( self, _("EPANET File Utility by Mark Morgan, WaterSums."), _("About EPANET File Utility"), wx.OK)
dlg.ShowModal() # Show it
dlg.Destroy() # finally destroy it when finished.
def OnExit(self, event):
self.Close(True)
def makeColourPanel(self, colour):
from ColouredPanel import ColouredPanel
p = wx.Panel(self.control, -1)
p.win = ColouredPanel(p, colour)
p.Sizer = wx.BoxSizer(wx.VERTICAL)
p.Sizer.Add(p.win, 1, wx.GROW)
return p
class MyProgressDialog(wx.ProgressDialog):
def __init__(self, maxval):
self.maxval = 200
self.rangemin = 0
self.rangemax = 200
self.progress = wx.ProgressDialog(
_('Loading output file...'),
_('Reading output file...'),
self.maxval,
style = wx.PD_APP_MODAL
| wx.PD_AUTO_HIDE
| wx.PD_ELAPSED_TIME
| wx.PD_ESTIMATED_TIME
| wx.PD_REMAINING_TIME
)
def SetStepLimits(self, rangemin, rangemax):
# make sure these limits are in the range 0-maxval
self.rangemin = max(0, min(self.maxval, rangemin))
self.rangemax = max(self.rangemin, min(self.maxval, rangemax))
#print('MyProgress step limits: %d to %d' % (rangemin, rangemax))
def Update(self, value, newmsg = None):
# make sure value is in the range 0-100 (%)
value = max(0, min(100, value))
value = self.rangemin + int(float(value) *
float(self.rangemax - self.rangemin) / 100.0)
#print('MyProgress value: %d' % value)
self.progress.Update(value, newmsg)
def Hide(self):
self.progress.Hide()
def Destroy(self):
self.progress.Destroy()
class MyListbook(wx.Listbook):
def __init__(self, parent, id, log):
wx.Listbook.__init__(self, parent, id, style=
#wx.BK_DEFAULT
#wx.BK_TOP
#wx.BK_BOTTOM
wx.BK_LEFT
#wx.BK_RIGHT
#, size = wx.DefaultSize
)
self.log = log
def OnPageChanged(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
#print('OnPageChanged, old:%d, new:%d, sel:%d\n' % (old, new, sel))
event.Skip()
def OnPageChanging(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
#print('OnPageChanging, old:%d, new:%d, sel:%d\n' % (old, new, sel))
event.Skip()
version = "0.1.0.0"
if __name__ == '__main__':
if 'unicode' not in wx.PlatformInfo:
print(_("\nInstalled wxPython version: %s\nYou need a unicode build of wxPython to run this application.\n")%wx.version())
else:
print(_("\n%(EFUver)s, Installed wxPython version: %(wxver)s\n") %
{'EFUver': version, 'wxver': wx.version()})
if _hasXLRD:
print(_("xlrd imported successfully"))
else:
print(_("WARNING: can't import xlrd, so Tables option will not work"))
if _hasXLWT:
print(_("xlwt imported successfully"))
else:
print(_("WARNING: can't import xlwt, so Tables option will not work"))
if _hasXLUTILS:
print(_("xlutils imported successfully"))
else:
print(_("WARNING: can't import xlutils, so saving tables may not work"))
main()
|
WaterSums/EPANETFileUtility
|
EPANETFileUtility.py
|
Python
|
mit
| 16,145 | 0.006813 |
#Working with variables
import pyaudiogame
spk = pyaudiogame.speak
MyApp = pyaudiogame.App("My Application")
#Here are some variables
#Lets first write one line of text
my_name = "Frastlin"
#now lets write a number
my_age = 42
#now lets write several lines of text
my_song = """
My application tis to be,
the coolest you've ever seen!
"""
#Magic time!
def logic(actions):
key = actions['key']
if key == "a":
#Here is our one line of text, it will speak when we press a
spk(my_name)
elif key == "s":
#Here is our number, it will speak when we press s
spk(my_age)
elif key == "d":
#Here is our multiline text example. It will speak when we press d
spk(my_song)
MyApp.logic = logic
MyApp.run()
|
frastlin/PyAudioGame
|
examples/basic_tutorial/ex1.py
|
Python
|
mit
| 712 | 0.030899 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from django.views.generic import View
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import CreateKeypair
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.forms import ImportKeypair
LOG = logging.getLogger(__name__)
class CreateView(forms.ModalFormView):
form_class = CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_id(self, keypair):
return keypair.name
class DownloadView(TemplateView):
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
template_name = 'project/access_and_security/keypairs/download.html'
class GenerateView(View):
def get(self, request, keypair_name=None):
try:
keypair = api.nova.keypair_create(request, keypair_name)
except:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create keypair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(mimetype='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % slugify(keypair.name)
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
|
rackerlabs/horizon
|
openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py
|
Python
|
apache-2.0
| 3,100 | 0.000645 |
"""
File-based Checkpoints implementations.
"""
import os
import shutil
from tornado.web import HTTPError
from .checkpoints import (
Checkpoints,
GenericCheckpointsMixin,
)
from .fileio import FileManagerMixin
from IPython.utils import tz
from IPython.utils.path import ensure_dir_exists
from IPython.utils.py3compat import getcwd
from IPython.utils.traitlets import Unicode
class FileCheckpoints(FileManagerMixin, Checkpoints):
"""
A Checkpoints that caches checkpoints for files in adjacent
directories.
Only works with FileContentsManager. Use GenericFileCheckpoints if
you want file-based checkpoints with another ContentsManager.
"""
checkpoint_dir = Unicode(
'.ipynb_checkpoints',
config=True,
help="""The directory name in which to keep file checkpoints
This is a path relative to the file's own directory.
By default, it is .ipynb_checkpoints
""",
)
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.root_dir
except AttributeError:
return getcwd()
# ContentsManager-dependent checkpoint API
def create_checkpoint(self, contents_mgr, path):
"""Create a checkpoint."""
checkpoint_id = u'checkpoint'
src_path = contents_mgr._get_os_path(path)
dest_path = self.checkpoint_path(checkpoint_id, path)
self._copy(src_path, dest_path)
return self.checkpoint_model(checkpoint_id, dest_path)
def restore_checkpoint(self, contents_mgr, checkpoint_id, path):
"""Restore a checkpoint."""
src_path = self.checkpoint_path(checkpoint_id, path)
dest_path = contents_mgr._get_os_path(path)
self._copy(src_path, dest_path)
# ContentsManager-independent checkpoint API
def rename_checkpoint(self, checkpoint_id, old_path, new_path):
"""Rename a checkpoint from old_path to new_path."""
old_cp_path = self.checkpoint_path(checkpoint_id, old_path)
new_cp_path = self.checkpoint_path(checkpoint_id, new_path)
if os.path.isfile(old_cp_path):
self.log.debug(
"Renaming checkpoint %s -> %s",
old_cp_path,
new_cp_path,
)
with self.perm_to_403():
shutil.move(old_cp_path, new_cp_path)
def delete_checkpoint(self, checkpoint_id, path):
"""delete a file's checkpoint"""
path = path.strip('/')
cp_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(cp_path):
self.no_such_checkpoint(path, checkpoint_id)
self.log.debug("unlinking %s", cp_path)
with self.perm_to_403():
os.unlink(cp_path)
def list_checkpoints(self, path):
"""list the checkpoints for a given file
This contents manager currently only supports one checkpoint per file.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_path):
return []
else:
return [self.checkpoint_model(checkpoint_id, os_path)]
# Checkpoint-related utilities
def checkpoint_path(self, checkpoint_id, path):
"""find the path to a checkpoint"""
path = path.strip('/')
parent, name = ('/' + path).rsplit('/', 1)
parent = parent.strip('/')
basename, ext = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=ext,
)
os_path = self._get_os_path(path=parent)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
with self.perm_to_403():
ensure_dir_exists(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def checkpoint_model(self, checkpoint_id, os_path):
"""construct the info dict for a given checkpoint"""
stats = os.stat(os_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id=checkpoint_id,
last_modified=last_modified,
)
return info
# Error Handling
def no_such_checkpoint(self, path, checkpoint_id):
raise HTTPError(
404,
u'Checkpoint does not exist: %s@%s' % (path, checkpoint_id)
)
class GenericFileCheckpoints(GenericCheckpointsMixin, FileCheckpoints):
"""
Local filesystem Checkpoints that works with any conforming
ContentsManager.
"""
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint from the current content of a file."""
path = path.strip('/')
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._save_file(os_checkpoint_path, content, format=format)
# return the checkpoint info
return self.checkpoint_model(checkpoint_id, os_checkpoint_path)
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint from the current content of a notebook."""
path = path.strip('/')
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._save_notebook(os_checkpoint_path, nb)
# return the checkpoint info
return self.checkpoint_model(checkpoint_id, os_checkpoint_path)
def get_notebook_checkpoint(self, checkpoint_id, path):
"""Get a checkpoint for a notebook."""
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_checkpoint_path):
self.no_such_checkpoint(path, checkpoint_id)
return {
'type': 'notebook',
'content': self._read_notebook(
os_checkpoint_path,
as_version=4,
),
}
def get_file_checkpoint(self, checkpoint_id, path):
"""Get a checkpoint for a file."""
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
os_checkpoint_path = self.checkpoint_path(checkpoint_id, path)
if not os.path.isfile(os_checkpoint_path):
self.no_such_checkpoint(path, checkpoint_id)
content, format = self._read_file(os_checkpoint_path, format=None)
return {
'type': 'file',
'content': content,
'format': format,
}
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/IPython/html/services/contents/filecheckpoints.py
|
Python
|
mit
| 6,954 | 0 |
import psycopg2
import unittest
import sys
class LDropTablesTest(unittest.TestCase):
def setUp(self):
conn = psycopg2.connect("dbname=teste user=postgres")
conn.set_isolation_level(0) # set autocommit
self.cur = conn.cursor()
def tearDown(self):
self.cur.close()
def testCDropTableFugaRota(self):
self.cur.execute("DROP TABLE FugaRota;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testEDropTableHorario(self):
self.cur.execute("DROP TABLE Horario;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testDDropTableLocalization(self):
self.cur.execute("DROP TABLE Localization CASCADE;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testFDropTableOnibus(self):
self.cur.execute("DROP TABLE Onibus;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testBDropTablePontoOnibusRota(self):
self.cur.execute("DROP TABLE PontoOnibus_Rota;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testGDropTablePontoOnibus(self):
self.cur.execute("DROP TABLE PontoOnibus;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
def testADropTableRota(self):
self.cur.execute("DROP TABLE Rota CASCADE;")
self.assertEqual(self.cur.statusmessage, "DROP TABLE")
|
UFCGProjects/sig
|
src/tests/DropTablesTest.py
|
Python
|
mit
| 1,528 | 0.003927 |
from rx.core import ObservableBase
class AnonymousSubject(ObservableBase):
def __init__(self, observer, observable):
super(AnonymousSubject, self).__init__()
self.observer = observer
self.observable = observable
def _subscribe_core(self, observer):
return self.observable.subscribe(observer)
def on_completed(self):
self.observer.on_completed()
def on_error(self, exception):
self.observer.on_error(exception)
def on_next(self, value):
self.observer.on_next(value)
|
Sprytile/Sprytile
|
rx/subjects/anonymoussubject.py
|
Python
|
mit
| 548 | 0 |
from datetime import datetime
from django import forms
from django.utils.translation import ugettext_lazy as _
from tendenci.apps.base.fields import SplitDateTimeField
from tendenci.apps.payments.models import Payment, PaymentMethod
PAYMENT_METHODS = PaymentMethod.objects.filter().values_list(
'machine_name', 'human_name').exclude()
class MarkAsPaidForm(forms.ModelForm):
payment_method = forms.CharField(
max_length=20,
widget=forms.Select(choices=PAYMENT_METHODS))
submit_dt = SplitDateTimeField(
label=_('Submit Date and Time'),
initial=datetime.now())
class Meta:
model = Payment
fields = (
'amount',
'payment_method',
'submit_dt',
)
def save(self, user, invoice, *args, **kwargs):
"""
Save payment, bind invoice instance.
Set payment fields (e.g. name, description)
"""
instance = super(MarkAsPaidForm, self).save(*args, **kwargs)
instance.method = self.cleaned_data['payment_method']
instance.invoice = invoice
instance.first_name = invoice.bill_to_first_name
instance.last_name = invoice.bill_to_last_name
instance.email = invoice.bill_to_email
instance.status_detail = 'approved'
instance.creator = user
instance.creator_username = user.username
instance.owner = user
instance.owner_username = user.username
instance.save()
invoice_object = invoice.get_object()
if invoice_object:
if hasattr(invoice_object, 'get_payment_description'):
instance.description = invoice_object.get_payment_description(invoice)
if not instance.description:
instance.description = 'Tendenci Invoice {} for {}({})'.format(
instance.pk, invoice_object, invoice_object.pk)
return instance
class PaymentSearchForm(forms.Form):
SEARCH_CRITERIA_CHOICES = (
('', _('SELECT ONE')),
('first_name', _('First Name')),
('last_name', _('Last Name')),
('amount', _('Amount')),
('owner_username', _('Owner Username')),
('id', _('Payment ID')),
('invoice__id', _('Invoice ID')),
('trans_id', _('Transaction ID')),
('auth_code', _('Authorization Code'))
)
SEARCH_METHOD_CHOICES = (
('starts_with', _('Starts With')),
('contains', _('Contains')),
('exact', _('Exact')),
)
search_criteria = forms.ChoiceField(choices=SEARCH_CRITERIA_CHOICES,
required=False)
search_text = forms.CharField(max_length=100, required=False)
search_method = forms.ChoiceField(choices=SEARCH_METHOD_CHOICES,
required=False)
|
alirizakeles/tendenci
|
tendenci/apps/payments/forms.py
|
Python
|
gpl-3.0
| 2,831 | 0.000706 |
from __future__ import print_function
__title__ = 'pif.utils'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('ensure_autodiscover', 'list_checkers', 'get_public_ip')
from pif.base import registry
from pif.discover import autodiscover
def ensure_autodiscover():
"""
Ensures the IP checkers are discovered.
"""
if not registry._registry:
autodiscover()
def list_checkers():
"""
Lists available checkers.
:return list:
"""
return registry._registry.keys()
def get_public_ip(preferred_checker=None, verbose=False):
"""
Gets IP using one of the services.
:param str preffered checker: Checker UID. If given, the preferred checker is used.
:param bool verbose: If set to True, debug info is printed.
:return str:
"""
ensure_autodiscover()
# If use preferred checker.
if preferred_checker:
ip_checker_cls = registry.get(preferred_checker)
if not ip_checker_cls:
return False
ip_checker = ip_checker_cls(verbose=verbose)
ip = ip_checker.get_public_ip()
if verbose:
print('provider: ', ip_checker_cls)
return ip
# Using all checkers.
for ip_checker_name, ip_checker_cls in registry._registry.items():
ip_checker = ip_checker_cls(verbose=verbose)
try:
ip = ip_checker.get_public_ip()
if ip:
if verbose:
print('provider: ', ip_checker_cls)
return ip
except Exception as e:
if verbose:
print(e)
return False
|
djabber/Dashboard
|
bottle/dash/local/lib/pif-0.7/src/pif/utils.py
|
Python
|
mit
| 1,686 | 0.002372 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# 1. standard library imports
from numpy import nan
from numpy import isnan
from numpy import ndarray
from collections import OrderedDict
import warnings
# 2. third party imports
from astropy.table import Table, Column
from astropy.io import ascii
from astropy.time import Time
from astropy.utils.exceptions import AstropyDeprecationWarning
# 3. local imports - use relative imports
# commonly required local imports shown below as example
# all Query classes should inherit from BaseQuery.
from ..query import BaseQuery
# async_to_sync generates the relevant query tools from _async methods
from ..utils import async_to_sync
# import configurable items declared in __init__.py
from . import conf
__all__ = ['Horizons', 'HorizonsClass']
@async_to_sync
class HorizonsClass(BaseQuery):
"""
A class for querying the
`JPL Horizons <https://ssd.jpl.nasa.gov/horizons/>`_ service.
"""
TIMEOUT = conf.timeout
def __init__(self, id=None, location=None, epochs=None,
id_type=None):
"""Instantiate JPL query.
Parameters
----------
id : str, required
Name, number, or designation of the object to be queried.
location : str or dict, optional
Observer's location for ephemerides queries or center body name for
orbital element or vector queries. Uses the same codes as JPL
Horizons. If no location is provided, Earth's center is used for
ephemerides queries and the Sun's center for elements and vectors
queries. Arbitrary topocentric coordinates for ephemerides queries
can be provided in the format of a dictionary. The dictionary has to
be of the form {``'lon'``: longitude in deg (East positive, West
negative), ``'lat'``: latitude in deg (North positive, South
negative), ``'elevation'``: elevation in km above the reference
ellipsoid, [``'body'``: Horizons body ID of the central body;
optional; if this value is not provided it is assumed that this
location is on Earth]}.
epochs : scalar, list-like, or dictionary, optional
Either a list of epochs in JD or MJD format or a dictionary defining
a range of times and dates; the range dictionary has to be of the
form {``'start'``: 'YYYY-MM-DD [HH:MM:SS]', ``'stop'``: 'YYYY-MM-DD
[HH:MM:SS]', ``'step'``: 'n[y|d|m|s]'}. Epoch timescales depend on
the type of query performed: UTC for ephemerides queries, TDB for
element and vector queries. If no epochs are provided, the current
time is used.
id_type : str, optional
Controls Horizons's object selection for ``id``
[HORIZONSDOC_SELECTION]_ . Options: ``'designation'`` (small body
designation), ``'name'`` (asteroid or comet name),
``'asteroid_name'``, ``'comet_name'``, ``'smallbody'`` (asteroid
and comet search), or ``None`` (first search search planets,
natural satellites, spacecraft, and special cases, and if no
matches, then search small bodies).
References
----------
.. [HORIZONSDOC_SELECTION] https://ssd.jpl.nasa.gov/horizons/manual.html#select (retrieved 2021 Sep 23).
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> eros = Horizons(id='433', location='568',
... epochs={'start':'2017-01-01',
... 'stop':'2017-02-01',
... 'step':'1d'})
>>> print(eros) # doctest: +SKIP
JPLHorizons instance "433"; location=568, epochs={'start': '2017-01-01', 'step': '1d', 'stop': '2017-02-01'}, id_type=None
"""
super(HorizonsClass, self).__init__()
self.id = id
self.location = location
# check for epochs to be dict or list-like; else: make it a list
if epochs is not None:
if isinstance(epochs, (list, tuple, ndarray)):
pass
elif isinstance(epochs, dict):
if not ('start' in epochs and
'stop' in epochs and
'step' in epochs):
raise ValueError('time range ({:s}) requires start, stop, '
'and step'.format(str(epochs)))
else:
# turn scalars into list
epochs = [epochs]
self.epochs = epochs
# check for id_type
if id_type in ['majorbody', 'id']:
warnings.warn("``id_type``s 'majorbody' and 'id' are deprecated "
"and replaced with ``None``, which has the same "
"functionality.", AstropyDeprecationWarning)
id_type = None
if id_type not in [None, 'smallbody', 'designation', 'name',
'asteroid_name', 'comet_name']:
raise ValueError('id_type ({:s}) not allowed'.format(id_type))
self.id_type = id_type
# return raw response?
self.return_raw = False
self.query_type = None # ['ephemerides', 'elements', 'vectors']
self.uri = None # will contain query URL
self.raw_response = None # will contain raw response from server
def __str__(self):
"""
String representation of HorizonsClass object instance'
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> eros = Horizons(id='433', location='568',
... epochs={'start':'2017-01-01',
... 'stop':'2017-02-01',
... 'step':'1d'})
>>> print(eros) # doctest: +SKIP
JPLHorizons instance "433"; location=568, epochs={'start': '2017-01-01', 'step': '1d', 'stop': '2017-02-01'}, id_type=None
"""
return ('JPLHorizons instance \"{:s}\"; location={:s}, '
'epochs={:s}, id_type={:s}').format(
str(self.id),
str(self.location),
str(self.epochs),
str(self.id_type))
# ---------------------------------- query functions
def ephemerides_async(self, airmass_lessthan=99,
solar_elongation=(0, 180), max_hour_angle=0,
rate_cutoff=None,
skip_daylight=False,
refraction=False,
refsystem='ICRF',
closest_apparition=False, no_fragments=False,
quantities=conf.eph_quantities,
get_query_payload=False,
get_raw_response=False, cache=True,
extra_precision=False):
"""
Query JPL Horizons for ephemerides.
The ``location`` parameter in ``HorizonsClass`` refers in this case to
the location of the observer.
The following tables list the values queried, their definitions, data
types, units, and original Horizons designations (where available). For
more information on the definitions of these quantities, please refer to
the `Horizons User Manual <https://ssd.jpl.nasa.gov/?horizons_doc>`_.
+------------------+-----------------------------------------------+
| Column Name | Definition |
+==================+===============================================+
| targetname | official number, name, designation (string) |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| M1 | comet total abs mag (float, mag, ``M1``) |
+------------------+-----------------------------------------------+
| M2 | comet nuclear abs mag (float, mag, ``M2``) |
+------------------+-----------------------------------------------+
| k1 | total mag scaling factor (float, ``k1``) |
+------------------+-----------------------------------------------+
| k2 | nuclear mag scaling factor (float, ``k2``) |
+------------------+-----------------------------------------------+
| phasecoeff | comet phase coeff (float, mag/deg, ``PHCOFF``)|
+------------------+-----------------------------------------------+
| datetime | epoch (str, ``Date__(UT)__HR:MN:SC.fff``) |
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float, |
| | ``Date_________JDUT``) |
+------------------+-----------------------------------------------+
| solar_presence | information on Sun's presence (str) |
+------------------+-----------------------------------------------+
| flags | information on Moon, target status (str) |
+------------------+-----------------------------------------------+
| RA | target RA (float, deg, ``DEC_(XXX)``) |
+------------------+-----------------------------------------------+
| DEC | target DEC (float, deg, ``DEC_(XXX)``) |
+------------------+-----------------------------------------------+
| RA_app | target apparent RA (float, deg, |
| | ``R.A._(a-app)``) |
+------------------+-----------------------------------------------+
| DEC_app | target apparent DEC (float, deg, |
| | ``DEC_(a-app)``) |
+------------------+-----------------------------------------------+
| RA_rate | target rate RA (float, arcsec/hr, ``RA*cosD``)|
+------------------+-----------------------------------------------+
| DEC_rate | target RA (float, arcsec/hr, ``d(DEC)/dt``) |
+------------------+-----------------------------------------------+
| AZ | Azimuth (float, deg, EoN, ``Azi_(a-app)``) |
+------------------+-----------------------------------------------+
| EL | Elevation (float, deg, ``Elev_(a-app)``) |
+------------------+-----------------------------------------------+
| AZ_rate | Azimuth rate (float, arcsec/minute, |
| | ``dAZ*cosE``) |
+------------------+-----------------------------------------------+
| EL_rate | Elevation rate (float, arcsec/minute, |
| | ``d(ELV)/dt``) |
+------------------+-----------------------------------------------+
| sat_X | satellite X position (arcsec, |
| | ``X_(sat-prim)``) |
+------------------+-----------------------------------------------+
| sat_Y | satellite Y position (arcsec, |
| | ``Y_(sat-prim)``) |
+------------------+-----------------------------------------------+
| sat_PANG | satellite position angle (deg, |
| | ``SatPANG``) |
+------------------+-----------------------------------------------+
| siderealtime | local apparent sidereal time (str, |
| | ``L_Ap_Sid_Time``) |
+------------------+-----------------------------------------------+
| airmass | target optical airmass (float, ``a-mass``) |
+------------------+-----------------------------------------------+
| magextinct | V-mag extinction (float, mag, ``mag_ex``) |
+------------------+-----------------------------------------------+
| V | V magnitude (float, mag, ``APmag``) |
+------------------+-----------------------------------------------+
| Tmag | comet Total magnitude (float, mag, ``T-mag``) |
+------------------+-----------------------------------------------+
| Nmag | comet Nucleaus magnitude (float, mag, |
| | ``N-mag``) |
+------------------+-----------------------------------------------+
| surfbright | surf brightness (float, mag/arcsec^2, |
| | ``S-brt``) |
+------------------+-----------------------------------------------+
| illumination | frac of illumination (float, percent, |
| | ``Illu%``) |
+------------------+-----------------------------------------------+
| illum_defect | Defect of illumination (float, arcsec, |
| | ``Dec_illu``) |
+------------------+-----------------------------------------------+
| sat_sep | Target-primary angular separation (float, |
| | arcsec, ``ang-sep``) |
+------------------+-----------------------------------------------+
| sat_vis | Target-primary visibility (str, ``v``) |
+------------------+-----------------------------------------------+
| ang_width | Angular width of target (float, arcsec, |
| | ``Ang-diam``) |
+------------------+-----------------------------------------------+
| PDObsLon | Apparent planetodetic longitude (float, deg, |
| | ``ObsSub-LON``) |
+------------------+-----------------------------------------------+
| PDObsLat | Apparent planetodetic latitude (float, deg, |
| | ``ObsSub-LAT``) |
+------------------+-----------------------------------------------+
| PDSunLon | Subsolar planetodetic longitude (float, deg, |
| | ``SunSub-LON``) |
+------------------+-----------------------------------------------+
| PDSunLat | Subsolar planetodetic latitude (float, deg, |
| | ``SunSub-LAT``) |
+------------------+-----------------------------------------------+
| SubSol_ang | Target sub-solar point position angle |
| | (float, deg, ``SN.ang``) |
+------------------+-----------------------------------------------+
| SubSol_dist | Target sub-solar point position angle distance|
| | (float, arcsec, ``SN.dist``) |
+------------------+-----------------------------------------------+
| NPole_ang | Target's North Pole position angle |
| | (float, deg, ``NP.ang``) |
+------------------+-----------------------------------------------+
| NPole_dist | Target's North Pole position angle distance |
| | (float, arcsec, ``NP.dist``) |
+------------------+-----------------------------------------------+
| EclLon | heliocentr ecl long (float, deg, ``hEcl-Lon``)|
+------------------+-----------------------------------------------+
| EclLat | heliocentr ecl lat (float, deg, ``hEcl-Lat``) |
+------------------+-----------------------------------------------+
| ObsEclLon | obscentr ecl long (float, deg, ``ObsEcLon``) |
+------------------+-----------------------------------------------+
| ObsEclLat | obscentr ecl lat (float, deg, ``ObsEcLat``) |
+------------------+-----------------------------------------------+
| r | heliocentric distance (float, au, ``r``) |
+------------------+-----------------------------------------------+
| r_rate | heliocentric radial rate (float, km/s, |
| | ``rdot``) |
+------------------+-----------------------------------------------+
| delta | distance from observer (float, au, ``delta``) |
+------------------+-----------------------------------------------+
| delta_rate | obs-centric rad rate (float, km/s, ``deldot``)|
+------------------+-----------------------------------------------+
| lighttime | one-way light time (float, min, ``1-way_LT``) |
+------------------+-----------------------------------------------+
| vel_sun | Target center velocity wrt Sun |
| | (float, km/s, ``VmagSn``) |
+------------------+-----------------------------------------------+
| vel_obs | Target center velocity wrt Observer |
| | (float, km/s, ``VmagOb``) |
+------------------+-----------------------------------------------+
| elong | solar elongation (float, deg, ``S-O-T``) |
+------------------+-----------------------------------------------+
| elongFlag | app. position relative to Sun (str, ``/r``) |
+------------------+-----------------------------------------------+
| alpha | solar phase angle (float, deg, ``S-T-O``) |
+------------------+-----------------------------------------------+
| lunar_elong | Apparent lunar elongation angle wrt target |
| | (float, deg, ``T-O-M``) |
+------------------+-----------------------------------------------+
| lunar_illum | Lunar illumination percentage |
| | (float, percent, ``MN_Illu%``) |
+------------------+-----------------------------------------------+
| IB_elong | Apparent interfering body elongation angle |
| | wrt target (float, deg, ``T-O-I``) |
+------------------+-----------------------------------------------+
| IB_illum | Interfering body illumination percentage |
| | (float, percent, ``IB_Illu%``) |
+------------------+-----------------------------------------------+
| sat_alpha | Observer-Primary-Target angle |
| | (float, deg, ``O-P-T``) |
+------------------+-----------------------------------------------+
| OrbPlaneAng | orbital plane angle (float, deg, ``PlAng``) |
+------------------+-----------------------------------------------+
| sunTargetPA | -Sun vector PA (float, deg, EoN, ``PsAng``) |
+------------------+-----------------------------------------------+
| velocityPA | -velocity vector PA (float, deg, EoN, |
| | ``PsAMV``) |
+------------------+-----------------------------------------------+
| constellation | constellation ID containing target (str, |
| | ``Cnst``) |
+------------------+-----------------------------------------------+
| TDB-UT | difference between TDB and UT (float, |
| | seconds, ``TDB-UT``) |
+------------------+-----------------------------------------------+
| NPole_RA | Target's North Pole RA (float, deg, |
| | ``N.Pole-RA``) |
+------------------+-----------------------------------------------+
| NPole_DEC | Target's North Pole DEC (float, deg, |
| | ``N.Pole-DC``) |
+------------------+-----------------------------------------------+
| GlxLon | galactic longitude (float, deg, ``GlxLon``) |
+------------------+-----------------------------------------------+
| GlxLat | galactic latitude (float, deg, ``GlxLat``) |
+------------------+-----------------------------------------------+
| solartime | local apparent solar time (string, |
| | ``L_Ap_SOL_Time``) |
+------------------+-----------------------------------------------+
| earth_lighttime | observer lighttime from center of Earth |
| | (float, minutes, ``399_ins_LT`` |
+------------------+-----------------------------------------------+
| RA_3sigma | 3 sigma positional uncertainty in RA (float, |
| | arcsec, ``RA_3sigma``) |
+------------------+-----------------------------------------------+
| DEC_3sigma | 3 sigma positional uncertainty in DEC (float,|
| | arcsec, ``DEC_3sigma``) |
+------------------+-----------------------------------------------+
| SMAA_3sigma | 3sig pos unc error ellipse semi-major axis |
| | (float, arcsec, ``SMAA_3sig``) |
+------------------+-----------------------------------------------+
| SMIA_3sigma | 3sig pos unc error ellipse semi-minor axis |
| | (float, arcsec, ``SMIA_3sig``) |
+------------------+-----------------------------------------------+
| Theta_3sigma | pos unc error ellipse position angle |
| | (float, deg, ``Theta``) |
+------------------+-----------------------------------------------+
| Area_3sigma | 3sig pos unc error ellipse are |
| | (float, arcsec^2, ``Area_3sig``) |
+------------------+-----------------------------------------------+
| RSS_3sigma | 3sig pos unc error ellipse root-sum-square |
| | (float, arcsec, ``POS_3sigma``) |
+------------------+-----------------------------------------------+
| r_3sigma | 3sig range uncertainty |
| | (float, km, ``RNG_3sigma``) |
+------------------+-----------------------------------------------+
| r_rate_3sigma | 3sig range rate uncertainty |
| | (float, km/second, ``RNGRT_3sigma``) |
+------------------+-----------------------------------------------+
| SBand_3sigma | 3sig Doppler radar uncertainties at S-band |
| | (float, Hertz, ``DOP_S_3sig``) |
+------------------+-----------------------------------------------+
| XBand_3sigma | 3sig Doppler radar uncertainties at X-band |
| | (float, Hertz, ``DOP_X_3sig``) |
+------------------+-----------------------------------------------+
| DoppDelay_3sigma | 3sig Doppler radar round-trip delay |
| | unc (float, second, ``RT_delay_3sig``) |
+------------------+-----------------------------------------------+
| true_anom | True Anomaly (float, deg, ``Tru_Anom``) |
+------------------+-----------------------------------------------+
| hour_angle | local apparent hour angle (string, |
| | sexagesimal angular hours, ``L_Ap_Hour_Ang``) |
+------------------+-----------------------------------------------+
| alpha_true | true phase angle (float, deg, ``phi``) |
+------------------+-----------------------------------------------+
| PABLon | phase angle bisector longitude |
| | (float, deg, ``PAB-LON``) |
+------------------+-----------------------------------------------+
| PABLat | phase angle bisector latitude |
| | (float, deg, ``PAB-LAT``) |
+------------------+-----------------------------------------------+
Parameters
----------
airmass_lessthan : float, optional
Defines a maximum airmass for the query, default: 99
solar_elongation : tuple, optional
Permissible solar elongation range: (minimum, maximum); default:
(0,180)
max_hour_angle : float, optional
Defines a maximum hour angle for the query, default: 0
rate_cutoff : float, optional
Angular range rate upper limit cutoff in arcsec/h; default: disabled
skip_daylight : boolean, optional
Crop daylight epochs in query, default: False
refraction : boolean
If ``True``, coordinates account for a standard atmosphere
refraction model; if ``False``, coordinates do not account for
refraction (airless model); default: ``False``
refsystem : string
Coordinate reference system: ``'ICRF'`` or ``'B1950'``; default:
``'ICRF'``
closest_apparition : boolean, optional
Only applies to comets. This option will choose the closest
apparition available in time to the selected epoch; default: False.
Do not use this option for non-cometary objects.
no_fragments : boolean, optional
Only applies to comets. Reject all comet fragments from selection;
default: False. Do not use this option for
non-cometary objects.
quantities : integer or string, optional
Single integer or comma-separated list in the form of a string
corresponding to all the quantities to be queried from JPL Horizons
using the coding according to the `JPL Horizons User Manual
Definition of Observer Table Quantities
<https://ssd.jpl.nasa.gov/?horizons_doc#table_quantities>`_;
default: all quantities
get_query_payload : boolean, optional
When set to `True` the method returns the HTTP request parameters as
a dict, default: False
get_raw_response : boolean, optional
Return raw data as obtained by JPL Horizons without parsing the data
into a table, default: False
extra_precision : boolean, optional
Enables extra precision in RA and DEC values; default: False
Returns
-------
response : `requests.Response`
The response of the HTTP request.
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> obj = Horizons(id='Ceres', location='568',
... epochs={'start':'2010-01-01',
... 'stop':'2010-03-01',
... 'step':'10d'})
>>> eph = obj.ephemerides() # doctest: +SKIP
>>> print(eph) # doctest: +SKIP
targetname datetime_str datetime_jd ... PABLon PABLat
--- --- d ... deg deg
----------------- ----------------- ----------- ... -------- ------
1 Ceres (A801 AA) 2010-Jan-01 00:00 2455197.5 ... 238.2494 4.5532
1 Ceres (A801 AA) 2010-Jan-11 00:00 2455207.5 ... 241.3339 4.2832
1 Ceres (A801 AA) 2010-Jan-21 00:00 2455217.5 ... 244.3394 4.0089
1 Ceres (A801 AA) 2010-Jan-31 00:00 2455227.5 ... 247.2518 3.7289
1 Ceres (A801 AA) 2010-Feb-10 00:00 2455237.5 ... 250.0576 3.4415
1 Ceres (A801 AA) 2010-Feb-20 00:00 2455247.5 ... 252.7383 3.1451
"""
URL = conf.horizons_server
# check for required information
if self.id is None:
raise ValueError("'id' parameter not set. Query aborted.")
if self.location is None:
self.location = '500@399'
if self.epochs is None:
self.epochs = Time.now().jd
# assemble commandline based on self.id_type
commandline = str(self.id)
if self.id_type in ['designation', 'name',
'asteroid_name', 'comet_name']:
commandline = ({'designation': 'DES=',
'name': 'NAME=',
'asteroid_name': 'ASTNAM=',
'comet_name': 'COMNAM='}[self.id_type] +
commandline)
if self.id_type in ['smallbody', 'asteroid_name',
'comet_name', 'designation']:
commandline += ';'
if isinstance(closest_apparition, bool):
if closest_apparition:
commandline += ' CAP;'
else:
commandline += ' CAP{:s};'.format(closest_apparition)
if no_fragments:
commandline += ' NOFRAG;'
request_payload = OrderedDict([
('format', 'text'),
('EPHEM_TYPE', 'OBSERVER'),
('QUANTITIES', "'"+str(quantities)+"'"),
('COMMAND', '"' + commandline + '"'),
('SOLAR_ELONG', ('"' + str(solar_elongation[0]) + "," +
str(solar_elongation[1]) + '"')),
('LHA_CUTOFF', (str(max_hour_angle))),
('CSV_FORMAT', ('YES')),
('CAL_FORMAT', ('BOTH')),
('ANG_FORMAT', ('DEG')),
('APPARENT', ({False: 'AIRLESS',
True: 'REFRACTED'}[refraction])),
('REF_SYSTEM', refsystem),
('EXTRA_PREC', {True: 'YES', False: 'NO'}[extra_precision])])
if isinstance(self.location, dict):
if ('lon' not in self.location or 'lat' not in self.location or
'elevation' not in self.location):
raise ValueError(("'location' must contain lon, lat, "
"elevation"))
if 'body' not in self.location:
self.location['body'] = '399'
request_payload['CENTER'] = 'coord@{:s}'.format(
str(self.location['body']))
request_payload['COORD_TYPE'] = 'GEODETIC'
request_payload['SITE_COORD'] = "'{:f},{:f},{:f}'".format(
self.location['lon'], self.location['lat'],
self.location['elevation'])
else:
request_payload['CENTER'] = "'" + str(self.location) + "'"
if rate_cutoff is not None:
request_payload['ANG_RATE_CUTOFF'] = (str(rate_cutoff))
# parse self.epochs
if isinstance(self.epochs, (list, tuple, ndarray)):
request_payload['TLIST'] = "\n".join([str(epoch) for epoch in
self.epochs])
elif isinstance(self.epochs, dict):
if ('start' not in self.epochs or 'stop' not in self.epochs or
'step' not in self.epochs):
raise ValueError("'epochs' must contain start, " +
"stop, step")
request_payload['START_TIME'] = (
'"'+self.epochs['start'].replace("'", '')+'"')
request_payload['STOP_TIME'] = (
'"'+self.epochs['stop'].replace("'", '')+'"')
request_payload['STEP_SIZE'] = (
'"'+self.epochs['step'].replace("'", '')+'"')
else:
# treat epochs as scalar
request_payload['TLIST'] = str(self.epochs)
if airmass_lessthan < 99:
request_payload['AIRMASS'] = str(airmass_lessthan)
if skip_daylight:
request_payload['SKIP_DAYLT'] = 'YES'
else:
request_payload['SKIP_DAYLT'] = 'NO'
self.query_type = 'ephemerides'
# return request_payload if desired
if get_query_payload:
return request_payload
# set return_raw flag, if raw response desired
if get_raw_response:
self.return_raw = True
# query and parse
response = self._request('GET', URL, params=request_payload,
timeout=self.TIMEOUT, cache=cache)
self.uri = response.url
# check length of uri
if len(self.uri) >= 2000:
warnings.warn(('The uri used in this query is very long '
'and might have been truncated. The results of '
'the query might be compromised. If you queried '
'a list of epochs, consider querying a range.'))
return response
def elements_async(self, get_query_payload=False,
refsystem='ICRF',
refplane='ecliptic',
tp_type='absolute',
closest_apparition=False, no_fragments=False,
get_raw_response=False, cache=True):
"""
Query JPL Horizons for osculating orbital elements.
The ``location`` parameter in ``HorizonsClass`` refers in this case to
the center body relative to which the elements are provided.
The following table lists the values queried, their definitions, data
types, units, and original Horizons designations (where available). For
more information on the definitions of these quantities, please refer to
the `Horizons User Manual <https://ssd.jpl.nasa.gov/?horizons_doc>`_.
+------------------+-----------------------------------------------+
| Column Name | Definition |
+==================+===============================================+
| targetname | official number, name, designation (string) |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| M1 | comet total abs mag (float, mag, ``M1``) |
+------------------+-----------------------------------------------+
| M2 | comet nuclear abs mag (float, mag, ``M2``) |
+------------------+-----------------------------------------------+
| k1 | total mag scaling factor (float, ``k1``) |
+------------------+-----------------------------------------------+
| k2 | nuclear mag scaling factor (float, ``k2``) |
+------------------+-----------------------------------------------+
| phasecoeff | comet phase coeff (float, mag/deg, ``PHCOFF``)|
+------------------+-----------------------------------------------+
| datetime_str | epoch Date (str, ``Calendar Date (TDB)``) |
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float, ``JDTDB``) |
+------------------+-----------------------------------------------+
| e | eccentricity (float, ``EC``) |
+------------------+-----------------------------------------------+
| q | periapsis distance (float, au, ``QR``) |
+------------------+-----------------------------------------------+
| a | semi-major axis (float, au, ``A``) |
+------------------+-----------------------------------------------+
| incl | inclination (float, deg, ``IN``) |
+------------------+-----------------------------------------------+
| Omega | longitude of Asc. Node (float, deg, ``OM``) |
+------------------+-----------------------------------------------+
| w | argument of the perifocus (float, deg, ``W``) |
+------------------+-----------------------------------------------+
| Tp_jd | time of periapsis (float, Julian Date, ``Tp``)|
+------------------+-----------------------------------------------+
| n | mean motion (float, deg/d, ``N``) |
+------------------+-----------------------------------------------+
| M | mean anomaly (float, deg, ``MA``) |
+------------------+-----------------------------------------------+
| nu | true anomaly (float, deg, ``TA``) |
+------------------+-----------------------------------------------+
| period | orbital period (float, (Earth) d, ``PR``) |
+------------------+-----------------------------------------------+
| Q | apoapsis distance (float, au, ``AD``) |
+------------------+-----------------------------------------------+
Parameters
----------
refsystem : string
Element reference system for geometric and astrometric quantities:
``'ICRF'`` or ``'B1950'``; default: ``'ICRF'``
refplane : string
Reference plane for all output quantities: ``'ecliptic'`` (ecliptic
and mean equinox of reference epoch), ``'earth'`` (Earth mean
equator and equinox of reference epoch), or ``'body'`` (body mean
equator and node of date); default: ``'ecliptic'``
tp_type : string
Representation for time-of-perihelion passage: ``'absolute'`` or
``'relative'`` (to epoch); default: ``'absolute'``
closest_apparition : boolean, optional
Only applies to comets. This option will choose the closest
apparition available in time to the selected epoch; default: False.
Do not use this option for non-cometary objects.
no_fragments : boolean, optional
Only applies to comets. Reject all comet fragments from selection;
default: False. Do not use this option for non-cometary objects.
get_query_payload : boolean, optional
When set to ``True`` the method returns the HTTP request parameters
as a dict, default: False
get_raw_response: boolean, optional
Return raw data as obtained by JPL Horizons without parsing the data
into a table, default: False
Returns
-------
response : `requests.Response`
The response of the HTTP request.
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> obj = Horizons(id='433', location='500@10',
... epochs=2458133.33546)
>>> el = obj.elements() # doctest: +SKIP
>>> print(el) # doctest: +SKIP
targetname datetime_jd ... Q P
--- d ... AU d
------------------ ------------- ... ------------- ------------
433 Eros (1898 DQ) 2458133.33546 ... 1.78244263804 642.93873484
"""
URL = conf.horizons_server
# check for required information
if self.id is None:
raise ValueError("'id' parameter not set. Query aborted.")
if self.location is None:
self.location = '500@10'
if self.epochs is None:
self.epochs = Time.now().jd
# assemble commandline based on self.id_type
commandline = str(self.id)
if self.id_type in ['designation', 'name',
'asteroid_name', 'comet_name']:
commandline = ({'designation': 'DES=',
'name': 'NAME=',
'asteroid_name': 'ASTNAM=',
'comet_name': 'COMNAM='}[self.id_type] +
commandline)
if self.id_type in ['smallbody', 'asteroid_name',
'comet_name', 'designation']:
commandline += ';'
if isinstance(closest_apparition, bool):
if closest_apparition:
commandline += ' CAP;'
else:
commandline += ' CAP{:s};'.format(closest_apparition)
if no_fragments:
commandline += ' NOFRAG;'
if isinstance(self.location, dict):
raise ValueError(('cannot use topographic position in orbital'
'elements query'))
# configure request_payload for ephemerides query
request_payload = OrderedDict([
('format', 'text'),
('EPHEM_TYPE', 'ELEMENTS'),
('MAKE_EPHEM', 'YES'),
('OUT_UNITS', 'AU-D'),
('COMMAND', '"' + commandline + '"'),
('CENTER', ("'" + str(self.location) + "'")),
('CSV_FORMAT', 'YES'),
('ELEM_LABELS', 'YES'),
('OBJ_DATA', 'YES'),
('REF_SYSTEM', refsystem),
('REF_PLANE', {'ecliptic': 'ECLIPTIC', 'earth': 'FRAME',
'body': "'BODY EQUATOR'"}[refplane]),
('TP_TYPE', {'absolute': 'ABSOLUTE',
'relative': 'RELATIVE'}[tp_type])])
# parse self.epochs
if isinstance(self.epochs, (list, tuple, ndarray)):
request_payload['TLIST'] = "\n".join([str(epoch) for
epoch in
self.epochs])
elif type(self.epochs) is dict:
if ('start' not in self.epochs or 'stop' not in self.epochs or
'step' not in self.epochs):
raise ValueError("'epochs' must contain start, "
"stop, step")
request_payload['START_TIME'] = (
'"'+self.epochs['start'].replace("'", '')+'"')
request_payload['STOP_TIME'] = (
'"'+self.epochs['stop'].replace("'", '')+'"')
request_payload['STEP_SIZE'] = (
'"'+self.epochs['step'].replace("'", '')+'"')
else:
request_payload['TLIST'] = str(self.epochs)
self.query_type = 'elements'
# return request_payload if desired
if get_query_payload:
return request_payload
# set return_raw flag, if raw response desired
if get_raw_response:
self.return_raw = True
# query and parse
response = self._request('GET', URL, params=request_payload,
timeout=self.TIMEOUT, cache=cache)
self.uri = response.url
# check length of uri
if len(self.uri) >= 2000:
warnings.warn(('The uri used in this query is very long '
'and might have been truncated. The results of '
'the query might be compromised. If you queried '
'a list of epochs, consider querying a range.'))
return response
def vectors_async(self, get_query_payload=False,
closest_apparition=False, no_fragments=False,
get_raw_response=False, cache=True,
refplane='ecliptic', aberrations='geometric',
delta_T=False,):
"""
Query JPL Horizons for state vectors.
The ``location`` parameter in ``HorizonsClass`` refers in this case to
the center body relative to which the vectors are provided.
The following table lists the values queried, their definitions, data
types, units, and original Horizons designations (where available). For
more information on the definitions of these quantities, please refer to
the `Horizons User Manual <https://ssd.jpl.nasa.gov/?horizons_doc>`_.
+------------------+-----------------------------------------------+
| Column Name | Definition |
+==================+===============================================+
| targetname | official number, name, designation (string) |
+------------------+-----------------------------------------------+
| H | absolute magnitude in V band (float, mag) |
+------------------+-----------------------------------------------+
| G | photometric slope parameter (float) |
+------------------+-----------------------------------------------+
| M1 | comet total abs mag (float, mag, ``M1``) |
+------------------+-----------------------------------------------+
| M2 | comet nuclear abs mag (float, mag, ``M2``) |
+------------------+-----------------------------------------------+
| k1 | total mag scaling factor (float, ``k1``) |
+------------------+-----------------------------------------------+
| k2 | nuclear mag scaling factor (float, ``k2``) |
+------------------+-----------------------------------------------+
| phasecoeff | comet phase coeff (float, mag/deg, ``PHCOFF``)|
+------------------+-----------------------------------------------+
| datetime_str | epoch Date (str, ``Calendar Date (TDB)``) |
+------------------+-----------------------------------------------+
| datetime_jd | epoch Julian Date (float, ``JDTDB``) |
+------------------+-----------------------------------------------+
| delta_T | time-varying difference between TDB and UT |
| | (float, ``delta-T``, optional) |
+------------------+-----------------------------------------------+
| x | x-component of position vector |
| | (float, au, ``X``) |
+------------------+-----------------------------------------------+
| y | y-component of position vector |
| | (float, au, ``Y``) |
+------------------+-----------------------------------------------+
| z | z-component of position vector |
| | (float, au, ``Z``) |
+------------------+-----------------------------------------------+
| vx | x-component of velocity vector (float, au/d, |
| | ``VX``) |
+------------------+-----------------------------------------------+
| vy | y-component of velocity vector (float, au/d, |
| | ``VY``) |
+------------------+-----------------------------------------------+
| vz | z-component of velocity vector (float, au/d, |
| | ``VZ``) |
+------------------+-----------------------------------------------+
| lighttime | one-way lighttime (float, d, ``LT``) |
+------------------+-----------------------------------------------+
| range | range from coordinate center (float, au, |
| | ``RG``) |
+------------------+-----------------------------------------------+
| range_rate | range rate (float, au/d, ``RR``) |
+------------------+-----------------------------------------------+
Parameters
----------
closest_apparition : boolean, optional
Only applies to comets. This option will choose the closest
apparition available in time to the selected epoch; default: False.
Do not use this option for non-cometary objects.
no_fragments : boolean, optional
Only applies to comets. Reject all comet fragments from selection;
default: False. Do not use this option for non-cometary objects.
get_query_payload : boolean, optional
When set to `True` the method returns the HTTP request parameters as
a dict, default: False
get_raw_response: boolean, optional
Return raw data as obtained by JPL Horizons without parsing the data
into a table, default: False
refplane : string
Reference plane for all output quantities: ``'ecliptic'`` (ecliptic
and mean equinox of reference epoch), ``'earth'`` (Earth mean
equator and equinox of reference epoch), or ``'body'`` (body mean
equator and node of date); default: ``'ecliptic'``.
See :ref:`Horizons Reference Frames <jpl-horizons-reference-frames>`
in the astroquery documentation for details.
aberrations : string, optional
Aberrations to be accounted for: [``'geometric'``,
``'astrometric'``, ``'apparent'``]. Default: ``'geometric'``
delta_T : boolean, optional
Triggers output of time-varying difference between TDB and UT
time-scales. Default: False
Returns
-------
response : `requests.Response`
The response of the HTTP request.
Examples
--------
>>> from astroquery.jplhorizons import Horizons
>>> obj = Horizons(id='2012 TC4', location='257',
... epochs={'start':'2017-10-01',
... 'stop':'2017-10-02',
... 'step':'10m'})
>>> vec = obj.vectors() # doctest: +SKIP
>>> print(vec) # doctest: +SKIP
targetname datetime_jd ... range range_rate
--- d ... AU AU / d
---------- ------------- ... --------------- -----------------
(2012 TC4) 2458027.5 ... 0.0429332099306 -0.00408018711862
(2012 TC4) 2458027.50694 ... 0.0429048742906 -0.00408040726527
(2012 TC4) 2458027.51389 ... 0.0428765385796 -0.00408020747595
(2012 TC4) 2458027.52083 ... 0.0428482057142 -0.0040795878561
(2012 TC4) 2458027.52778 ... 0.042819878607 -0.00407854931543
(2012 TC4) 2458027.53472 ... 0.0427915601617 -0.0040770935665
... ... ... ... ...
(2012 TC4) 2458028.45833 ... 0.0392489462501 -0.00405496595173
(2012 TC4) 2458028.46528 ... 0.03922077771 -0.00405750632914
(2012 TC4) 2458028.47222 ... 0.039192592935 -0.00405964084539
(2012 TC4) 2458028.47917 ... 0.039164394759 -0.00406136516755
(2012 TC4) 2458028.48611 ... 0.0391361860433 -0.00406267574646
(2012 TC4) 2458028.49306 ... 0.0391079696711 -0.0040635698239
(2012 TC4) 2458028.5 ... 0.0390797485422 -0.00406404543822
Length = 145 rows
"""
URL = conf.horizons_server
# check for required information
if self.id is None:
raise ValueError("'id' parameter not set. Query aborted.")
if self.location is None:
self.location = '500@10'
if self.epochs is None:
self.epochs = Time.now().jd
# assemble commandline based on self.id_type
commandline = str(self.id)
if self.id_type in ['designation', 'name',
'asteroid_name', 'comet_name']:
commandline = ({'designation': 'DES=',
'name': 'NAME=',
'asteroid_name': 'ASTNAM=',
'comet_name': 'COMNAM='}[self.id_type] +
commandline)
if self.id_type in ['smallbody', 'asteroid_name',
'comet_name', 'designation']:
commandline += ';'
if isinstance(closest_apparition, bool):
if closest_apparition:
commandline += ' CAP;'
else:
commandline += ' CAP{:s};'.format(closest_apparition)
if no_fragments:
commandline += ' NOFRAG;'
if isinstance(self.location, dict):
raise ValueError(('cannot use topographic position in state'
'vectors query'))
# configure request_payload for ephemerides query
request_payload = OrderedDict([
('format', 'text'),
('EPHEM_TYPE', 'VECTORS'),
('OUT_UNITS', 'AU-D'),
('COMMAND', '"' + commandline + '"'),
('CENTER', ("'" + str(self.location) + "'")),
('CSV_FORMAT', ('"YES"')),
('REF_PLANE', {'ecliptic': 'ECLIPTIC',
'earth': 'FRAME',
'frame': 'FRAME',
'body': "'BODY EQUATOR'"}[refplane]),
('REF_SYSTEM', 'ICRF'),
('TP_TYPE', 'ABSOLUTE'),
('VEC_LABELS', 'YES'),
('VEC_CORR', {'geometric': '"NONE"',
'astrometric': '"LT"',
'apparent': '"LT+S"'}[aberrations]),
('VEC_DELTA_T', {True: 'YES', False: 'NO'}[delta_T]),
('OBJ_DATA', 'YES')]
)
# parse self.epochs
if isinstance(self.epochs, (list, tuple, ndarray)):
request_payload['TLIST'] = "\n".join([str(epoch) for epoch in
self.epochs])
elif type(self.epochs) is dict:
if ('start' not in self.epochs or 'stop' not in self.epochs or
'step' not in self.epochs):
raise ValueError("'epochs' must contain start, " +
"stop, step")
request_payload['START_TIME'] = (
'"'+self.epochs['start'].replace("'", '')+'"')
request_payload['STOP_TIME'] = (
'"'+self.epochs['stop'].replace("'", '')+'"')
request_payload['STEP_SIZE'] = (
'"'+self.epochs['step'].replace("'", '')+'"')
else:
# treat epochs as a list
request_payload['TLIST'] = str(self.epochs)
self.query_type = 'vectors'
# return request_payload if desired
if get_query_payload:
return request_payload
# set return_raw flag, if raw response desired
if get_raw_response:
self.return_raw = True
# query and parse
response = self._request('GET', URL, params=request_payload,
timeout=self.TIMEOUT, cache=cache)
self.uri = response.url
# check length of uri
if len(self.uri) >= 2000:
warnings.warn(('The uri used in this query is very long '
'and might have been truncated. The results of '
'the query might be compromised. If you queried '
'a list of epochs, consider querying a range.'))
return response
# ---------------------------------- parser functions
def _parse_horizons(self, src):
"""
Routine for parsing data from JPL Horizons
Parameters
----------
self : HorizonsClass instance
src : list
raw response from server
Returns
-------
data : `astropy.Table`
"""
self.raw_response = src
# return raw response, if desired
if self.return_raw:
# reset return_raw flag
self.return_raw = False
return self.raw_response
# split response by line break
src = src.split('\n')
data_start_idx = 0
data_end_idx = 0
H, G = nan, nan
M1, M2, k1, k2, phcof = nan, nan, nan, nan, nan
headerline = []
for idx, line in enumerate(src):
# read in ephemerides header line; replace some field names
if (self.query_type == 'ephemerides' and
"Date__(UT)__HR:MN" in line):
headerline = str(line).split(',')
headerline[2] = 'solar_presence'
headerline[3] = 'flags'
headerline[-1] = '_dump'
# read in elements header line
elif (self.query_type == 'elements' and
"JDTDB," in line):
headerline = str(line).split(',')
headerline[-1] = '_dump'
# read in vectors header line
elif (self.query_type == 'vectors' and
"JDTDB," in line):
headerline = str(line).split(',')
headerline[-1] = '_dump'
# identify end of data block
if "$$EOE" in line:
data_end_idx = idx
# identify start of data block
if "$$SOE" in line:
data_start_idx = idx + 1
# read in targetname
if "Target body name" in line:
targetname = line[18:50].strip()
# read in H and G (if available)
if "rotational period in hours)" in line:
HGline = src[idx + 2].split('=')
if 'B-V' in HGline[2] and 'G' in HGline[1]:
try:
H = float(HGline[1].rstrip('G'))
G = float(HGline[2].rstrip('B-V'))
except ValueError:
H = nan
G = nan
# read in M1, M2, k1, k2, and phcof (if available)
if "Comet physical" in line:
HGline = src[idx + 2].split('=')
try:
M1 = float(HGline[1].rstrip('M2'))
k1 = float(HGline[3].rstrip('k2'))
except ValueError:
M1 = nan
k1 = nan
try:
M2 = float(HGline[2].rstrip('k1'))
k2 = float(HGline[4].rstrip('PHCOF'))
except ValueError:
M2 = nan
k2 = nan
try:
phcof = float(HGline[5])
except ValueError:
phcof = nan
# catch unambiguous names
if (("Multiple major-bodies match string" in line or
"Matching small-bodies:" in line) and
("No matches found" not in src[idx + 1])):
for i in range(idx + 2, len(src), 1):
if (('To SELECT, enter record' in src[i]) or
('make unique selection.' in src[i])):
end_idx = i
break
raise ValueError(('Ambiguous target name; provide '
'unique id:\n%s' %
'\n'.join(src[idx + 2:end_idx])))
# catch unknown target
if ("Matching small-bodies" in line and
"No matches found" in src[idx + 1]):
raise ValueError(('Unknown target ({:s}). Maybe try '
'different id_type?').format(self.id))
# catch any unavailability of ephemeris data
if "No ephemeris for target" in line:
errormsg = line[line.find('No ephemeris for target'):]
errormsg = errormsg[:errormsg.find('\n')]
raise ValueError('Horizons Error: {:s}'.format(errormsg))
# catch elements errors
if "Cannot output elements" in line:
errormsg = line[line.find('Cannot output elements'):]
errormsg = errormsg[:errormsg.find('\n')]
raise ValueError('Horizons Error: {:s}'.format(errormsg))
# catch date error
if "Cannot interpret date" in line:
errormsg = line[line.find('Cannot interpret date'):]
errormsg = errormsg[:errormsg.find('\n')]
raise ValueError('Horizons Error: {:s}'.format(errormsg))
if 'INPUT ERROR' in line:
headerline = []
break
if headerline == []:
err_msg = "".join(src[data_start_idx:data_end_idx])
if len(err_msg) > 0:
raise ValueError('Query failed with error message:\n' +
err_msg)
else:
raise ValueError(('Query failed without known error message; '
'received the following response:\n'
'{}').format(self.raw_response))
# strip whitespaces from column labels
headerline = [h.strip() for h in headerline]
# remove all 'Cut-off' messages
raw_data = [line for line in src[data_start_idx:data_end_idx]
if 'Cut-off' not in line]
# read in data
data = ascii.read(raw_data,
names=headerline,
fill_values=[('.n.a.', '0'),
('n.a.', '0')],
fast_reader=False)
# force to a masked table
data = Table(data, masked=True)
# convert data to QTable
# from astropy.table import QTable
# data = QTable(data)
# does currently not work, unit assignment in columns creates error
# results in:
# TypeError: The value must be a valid Python or Numpy numeric type.
# remove last column as it is empty
data.remove_column('_dump')
# add targetname and physical properties as columns
data.add_column(Column([targetname] * len(data),
name='targetname'), index=0)
if not isnan(H):
data.add_column(Column([H] * len(data),
name='H'), index=3)
if not isnan(G):
data.add_column(Column([G] * len(data),
name='G'), index=4)
if not isnan(M1):
data.add_column(Column([M1] * len(data),
name='M1'), index=3)
if not isnan(M2):
data.add_column(Column([M2] * len(data),
name='M2'), index=4)
if not isnan(k1):
data.add_column(Column([k1] * len(data),
name='k1'), index=5)
if not isnan(k2):
data.add_column(Column([k2] * len(data),
name='k2'), index=6)
if not isnan(phcof):
data.add_column(Column([phcof] * len(data),
name='phasecoeff'), index=7)
# replace missing airmass values with 999 (not observable)
if self.query_type == 'ephemerides' and 'a-mass' in data.colnames:
data['a-mass'] = data['a-mass'].filled(999)
# set column definition dictionary
if self.query_type == 'ephemerides':
column_defs = conf.eph_columns
elif self.query_type == 'elements':
column_defs = conf.elem_columns
elif self.query_type == 'vectors':
column_defs = conf.vec_columns
else:
raise TypeError('Query type unknown.')
# set column units
rename = []
for col in data.columns:
data[col].unit = column_defs[col][1]
if data[col].name != column_defs[col][0]:
rename.append(data[col].name)
# rename columns
for col in rename:
try:
data.rename_column(data[col].name, column_defs[col][0])
except KeyError:
pass
return data
def _parse_result(self, response, verbose=None):
"""
Routine for managing parser calls;
This routine decides based on `self.query_type` which parser
has to be used.
Parameters
----------
self : Horizonsclass instance
response : string
raw response from server
Returns
-------
data : `astropy.Table`
"""
self.last_response = response
if self.query_type not in ['ephemerides', 'elements', 'vectors']:
return None
else:
try:
data = self._parse_horizons(response.text)
except Exception as ex:
try:
self._last_query.remove_cache_file(self.cache_location)
except OSError:
# this is allowed: if `cache` was set to False, this
# won't be needed
pass
raise
return data
# the default tool for users to interact with is an instance of the Class
Horizons = HorizonsClass()
|
ceb8/astroquery
|
astroquery/jplhorizons/core.py
|
Python
|
bsd-3-clause
| 67,058 | 0.000224 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('aldryn_newsblog', '0004_auto_20150622_1606'),
]
operations = [
migrations.AlterField(
model_name='newsblogconfig',
name='template_prefix',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Prefix for template dirs', choices=[(b'dummy', b'dummy')]),
preserve_default=True,
),
]
|
Venturi/cms
|
env/lib/python2.7/site-packages/aldryn_newsblog/migrations/0005_auto_20150807_0207.py
|
Python
|
gpl-2.0
| 558 | 0.001792 |
#!/usr/bin/env python
# Aid tools to quality checker.
# Qchecklib
# Eliane Araujo, 2016
import os
import sys
import commands
import json
try:
from cc import measure_complexity
except ImportError:
print("tst quality checker needs cc.py to work.")
sys.exit(1)
try:
sys.path.append('/usr/local/bin/radon/')
from radon.raw import *
from radon.complexity import *
from radon.metrics import *
except ImportError:
print("tst quality checker needs radon to work.")
sys.exit(1)
try:
import urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
url = 'http://qchecklog.appspot.com/api/action/'
def four_metrics(program_name):
return "%s %s %s %s" % ( lloc(program_name), cc(program_name), vhalstead(program_name), pep8(program_name)["count"])
def pep8count(program):
return int(pep8(program)[0])
def pep8(program):
result = []
cmd = 'pycodestyle.py --select=E --count ' + program
try:
pep_errors = commands.getoutput(cmd)
except ImportError:
print("tst quality checker needs pycodestyle.py to work.")
sys.exit(1)
if pep_errors:
for error in pep_errors.splitlines():
if error.isdigit():
result.insert(0, int(error))
break
#remove filename from message.
#Example:
#reference.py:15:16: E225 missing whitespace around operator
result.append( error[error.find(":") + 1:] )
else:
result = [0]
return result
def header_lines(filename):
# Count header's lines
# Consider "coding" and "env" as header
program = open(filename, 'r')
code = program.read()
counter = 0
codelines = code.split("\n")
while codelines[counter].startswith("#"):
counter += 1
program.close()
return counter
def vhalstead(filename):
return halstead_metrics("vol", filename)
def halstead_metrics(options, filename):
#It may be used another options
program = open(filename, 'r')
code = program.read()
if options == 'vol':
h = h_visit(code).volume
else:
h = h_visit(code)
program.close()
return round(h, 2)
def cc(filename):
# Radon complexity method only applies to programs containing classes or functions.
# Using another API to other cases.
program = open(filename, 'r')
code = program.read()
try:
# Use radon
visitor = cc_visit(code)
if len(visitor) <= 0:
# Doesn't have functions or classes.
# Use cc.py
stats = measure_complexity(code)
cc = stats.complexity
else:
cc = 0
for i in range( len(visitor) ):
cc += visitor[i].complexity
except Exception as e:
# Failed
print("qcheck: unable to get cc")
cc = 0
program.close()
return cc
def lloc(filename):
program = open(filename, 'r')
code = program.read()
lines = raw_metrics(code)[1]
program.close()
return lines
def raw_metrics(code):
return analyze(code)
def save(message):
type_ = 'accept'
urlrequest.urlopen(url + type_, data=message)
if __name__ == '__main__':
print("qchecklib is a helper module for tst_qcheck commands")
|
elianearaujo/tst-qcheck
|
bin/qchecklib.py
|
Python
|
agpl-3.0
| 3,348 | 0.011947 |
import numpy as np
from numpy import all
from numpy.testing import assert_almost_equal
from nose.tools import ok_
from ..blas import *
def ndrange(shape, dtype=np.double, order='C'):
return np.arange(np.prod(shape), dtype=dtype).reshape(shape).copy(order)
def assert_dgemm(dgemm_func, A_order, B_order, C_order):
def test(m, n, k):
A = ndrange((m, k), order=A_order)
B = ndrange((k, n), order=B_order)
C = np.zeros((m, n), order=C_order)
dgemm_func(A, B, C)
assert_almost_equal(C, np.dot(A, B))
test(2, 3, 4)
test(0, 3, 4)
test(2, 0, 4)
test(2, 3, 0)
test(0, 0, 2)
test(0, 2, 0)
test(0, 0, 2)
test(0, 0, 0)
def test_dgemm():
yield assert_dgemm, dgemm_crc, 'F', 'C', 'F'
yield assert_dgemm, dgemm_ccc, 'F', 'F', 'F'
|
wavemoth/wavemoth
|
wavemoth/test/test_blas.py
|
Python
|
gpl-2.0
| 812 | 0.004926 |
"""
WSGI config for cfg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cfg.settings")
try:
if os.environ['ENV'] == 'production':
from dj_static import Cling
application = Cling(get_wsgi_application())
except Exception as e:
application = get_wsgi_application()
|
vyscond/my-college-api
|
cfg/wsgi.py
|
Python
|
mit
| 546 | 0 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
import urllib3
import urllib
import xml.etree.ElementTree as etree
from superdesk.io.iptc import subject_codes
from datetime import datetime
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, ITEM_STATE, CONTENT_STATE
from superdesk.utc import utc
from superdesk.io.commands.update_ingest import process_iptc_codes
from superdesk.etree import get_text_word_count
# The older content does not contain an anpa category, so we derive it from the
# publication name
pubnames = {
'International Sport': 'S',
'Racing': 'R',
'Parliamentary Press Releases': 'P',
'Features': 'C',
'Financial News': 'F',
'General': 'A',
'aap Features': 'C',
'aap International News': 'I',
'aap Australian Sport': 'S',
'Australian General News': 'A',
'Asia Pulse Full': 'I',
'AFR Summary': 'A',
'Australian Sport': 'T',
'PR Releases': 'J',
'Entertainment News': 'E',
'Special Events': 'Y',
'Asia Pulse': 'I',
'aap International Sport': 'S',
'Emergency Services': 'A',
'BRW Summary': 'A',
'FBM Summary': 'A',
'aap Australian General News': 'A',
'International News': 'I',
'aap Financial News': 'F',
'Asia Pulse Basic': 'I',
'Political News': 'P',
'Advisories': 'V'
}
class AppImportTextArchiveCommand(superdesk.Command):
option_list = (
superdesk.Option('--start', '-strt', dest='start_id', required=False),
superdesk.Option('--user', '-usr', dest='user', required=True),
superdesk.Option('--password', '-pwd', dest='password', required=True),
superdesk.Option('--url_root', '-url', dest='url', required=True),
superdesk.Option('--query', '-qry', dest='query', required=True),
superdesk.Option('--count', '-c', dest='limit', required=False)
)
def run(self, start_id, user, password, url, query, limit):
print('Starting text archive import at {}'.format(start_id))
self._user = user
self._password = password
self._id = int(start_id)
self._url_root = url
self._query = urllib.parse.quote(query)
if limit is not None:
self._limit = int(limit)
else:
self._limit = None
self._api_login()
x = self._get_bunch(self._id)
while x:
self._process_bunch(x)
x = self._get_bunch(self._id)
if self._limit is not None and self._limit <= 0:
break
print('finished text archive import')
def _api_login(self):
self._http = urllib3.PoolManager()
credentials = '?login[username]={}&login[password]={}'.format(self._user, self._password)
url = self._url_root + credentials
r = self._http.urlopen('GET', url, headers={'Content-Type': 'application/xml'})
self._headers = {'cookie': r.getheader('set-cookie')}
self._anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
def _get_bunch(self, id):
url = self._url_root + \
'archives/txtarch?search_docs[struct_query]=(DCDATA_ID<{0})&search_docs[query]='.format(id)
url += self._query
url += '&search_docs[format]=full&search_docs[pagesize]=500&search_docs[page]=1'
url += '&search_docs[sortorder]=DCDATA_ID%20DESC'
print(url)
retries = 3
while retries > 0:
r = self._http.request('GET', url, headers=self._headers)
if r.status == 200:
e = etree.fromstring(r.data)
# print(str(r.data))
count = int(e.find('doc_count').text)
if count > 0:
print('count : {}'.format(count))
return e
else:
self._api_login()
retries -= 1
return None
def _get_head_value(self, doc, field):
el = doc.find('dcdossier/document/head/' + field)
if el is not None:
return el.text
return None
def _addkeywords(self, key, doc, item):
code = self._get_head_value(doc, key)
if code:
if 'keywords' not in item:
item['keywords'] = []
item['keywords'].append(code)
def _process_bunch(self, x):
# x.findall('dc_rest_docs/dc_rest_doc')[0].get('href')
for doc in x.findall('dc_rest_docs/dc_rest_doc'):
print(doc.get('href'))
id = doc.find('dcdossier').get('id')
if int(id) < self._id:
self._id = int(id)
item = {}
item['guid'] = doc.find('dcdossier').get('guid')
# if the item has been modified in the archive then it is due to a kill
# there is an argument that this item should not be imported at all
if doc.find('dcdossier').get('created') != doc.find('dcdossier').get('modified'):
item[ITEM_STATE] = CONTENT_STATE.KILLED
else:
item[ITEM_STATE] = CONTENT_STATE.PUBLISHED
value = datetime.strptime(self._get_head_value(doc, 'PublicationDate'), '%Y%m%d%H%M%S')
item['firstcreated'] = utc.normalize(value) if value.tzinfo else value
item['versioncreated'] = item['firstcreated']
item['unique_id'] = doc.find('dcdossier').get('unique')
item['ingest_id'] = id
item['source'] = self._get_head_value(doc, 'Agency')
self._addkeywords('AsiaPulseCodes', doc, item)
byline = self._get_head_value(doc, 'Byline')
if byline:
item['byline'] = byline
# item['service'] = self._get_head_value(doc,'Service')
category = self._get_head_value(doc, 'Category')
if not category:
publication_name = self._get_head_value(doc, 'PublicationName')
if publication_name in pubnames:
category = pubnames[publication_name]
if category:
anpacategory = {}
anpacategory['qcode'] = category
for anpa_category in self._anpa_categories['items']:
if anpacategory['qcode'].lower() == anpa_category['qcode'].lower():
anpacategory = {'qcode': anpacategory['qcode'], 'name': anpa_category['name']}
break
item['anpa_category'] = [anpacategory]
self._addkeywords('CompanyCodes', doc, item)
type = self._get_head_value(doc, 'Format')
if type == 'x':
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
elif type == 't':
item[ITEM_TYPE] = CONTENT_TYPE.PREFORMATTED
else:
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
item['keyword'] = self._get_head_value(doc, 'Keyword')
item['ingest_provider_sequence'] = self._get_head_value(doc, 'Sequence')
orginal_source = self._get_head_value(doc, 'Author')
if orginal_source:
item['original_source'] = orginal_source
item['headline'] = self._get_head_value(doc, 'Headline')
code = self._get_head_value(doc, 'SubjectRefNum')
if code and len(code) == 7:
code = '0' + code
if code and code in subject_codes:
item['subject'] = []
item['subject'].append({'qcode': code, 'name': subject_codes[code]})
try:
process_iptc_codes(item, None)
except:
pass
slug = self._get_head_value(doc, 'SLUG')
if slug:
item['slugline'] = slug
else:
item['slugline'] = self._get_head_value(doc, 'Keyword')
# self._addkeywords('Takekey', doc, item)
take_key = self._get_head_value(doc, 'Takekey')
if take_key:
item['anpa_take_key'] = take_key
self._addkeywords('Topic', doc, item)
self._addkeywords('Selectors', doc, item)
el = doc.find('dcdossier/document/body/BodyText')
if el is not None:
story = el.text
if item[ITEM_TYPE] == CONTENT_TYPE.TEXT:
story = story.replace('\n ', '<br><br>')
story = story.replace('\n', '<br>')
item['body_html'] = story
else:
item['body_html'] = story
try:
item['word_count'] = get_text_word_count(item['body_html'])
except:
pass
item['pubstatus'] = 'usable'
item['allow_post_publish_actions'] = False
res = superdesk.get_resource_service('published')
original = res.find_one(req=None, guid=item['guid'])
if not original:
res.post([item])
else:
res.patch(original['_id'], item)
if self._limit:
self._limit -= 1
# print(item)
superdesk.command('app:import_text_archive', AppImportTextArchiveCommand())
|
amagdas/superdesk
|
server/apps/aap/import_text_archive/commands.py
|
Python
|
agpl-3.0
| 9,503 | 0.001789 |
import re
import requests
import logging
logger = logging.getLogger(__name__)
class JsUnPacker(object):
"""
It takes the javascript file's url which contains the port numbers for
the encrypted strings. The file has to be unpacked to a readable form just like
http://matthewfl.com/unPacker.html does. Then we create a dictionary for
every key:port pair.
"""
# TODO: it might not be necessary to unpack the js code
def __init__(self, js_file_url):
logger.info("JS UnPacker init path: {}".format(js_file_url))
r = requests.get(js_file_url)
encrypted = r.text.strip()
encrypted = '(' + encrypted.split('}(')[1][:-1]
unpacked = eval('self.unpack' +encrypted) # string of the js code in unpacked form
matches = re.findall(r".*?\('\.([a-zA-Z0-9]{1,6})'\).*?\((\d+)\)", unpacked)
self.ports = dict((key, port) for key, port in matches)
logger.debug('portmap: '+str(self.ports))
def baseN(self, num, b, numerals="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"):
return ((num == 0) and numerals[0]) or (self.baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def unpack(self, p, a, c, k, e=None, d=None):
while c:
c -= 1
if k[c]:
p = re.sub("\\b" + self.baseN(c, a) + "\\b", k[c], p)
return p
def get_port(self, key):
return self.ports[key]
def get_ports(self):
return self.ports
|
pgaref/HTTP_Request_Randomizer
|
http_request_randomizer/requests/parsers/js/UnPacker.py
|
Python
|
mit
| 1,507 | 0.004645 |
#!/usr/bin/python3
import json
import numpy as np
import random
class Payload():
'''
Instance contains altitude data for a balloon flight. Use alt() method
to get interpolated data.
'''
addr = None
name = "Payload"
time_index = 0.0 # index into the flight profile data, in seconds.
timestep = 1.0 # gets larger as weight decreases.
last_request_time = 0
last_alt = 0
def __init__(self, filename, mass, ballast, name):
self.parse_profile(filename)
self.initial_mass = mass # kg
self.mass = self.initial_mass # kg
# ballast is included in total mass.
self.initial_ballast = ballast # liters
self.name = name
def parse_profile(self, filename):
'''
Parse a JSON file containing an ascent profile.
'''
with open(filename, 'r') as data_file:
profile = json.load(data_file)
# Create an array of int32's. Alt is in decimeters.
self.alts = (np.array(profile['data'])*10).astype(np.int32)
self.ref_timestep = profile['timestep'] # s
self.ref_mass = profile['mass'] # s
self.times = np.arange(0, self.alts.size*self.ref_timestep, self.ref_timestep)
def alt(self):
'''
Returns the altitude at the desired time.
s is the time in seconds, with 0 being the beginning
of the flight.
'''
# alt = None if seconds is outside of the flight time.
if (self.time_index > self.alts.size):
alt = None
print("time index > alt size")
print(self.time_index)
elif (self.time_index < 0):
alt = None
print("time index < 0")
print(self.time_index)
# otherwise, linearly interpolate between the two closest values.
else:
alt = np.empty
alt = np.interp(self.time_index, self.times, self.alts)
return alt
# Did curve-fitting on HabHub data to come up with timestep adjustment.
def adjust_time(self, time_elapsed):
time_delta = time_elapsed - self.last_request_time
self.last_request_time = time_elapsed
x = self.ref_mass / self.mass
self.timestep = 1.0/(-0.0815243*x*x*x + 0.1355*x*x - 0.391461*x + 1.33748611)
self.time_index += time_delta*self.timestep
def drop_mass(self, ballast_time_ms):
# experimental results show 4.925ml/s drain rate. with current setup.
# we give it random +-10% error, because the payload is getting
# blasted by high winds and the ballast is sloshing around.
noise = random.uniform(0.9, 1.1)
new_mass = self.mass - (noise*ballast_time_ms*0.004925/1000)*0.8
if (new_mass > self.initial_mass - self.initial_ballast):
self.mass = new_mass
else:
self.mass = self.initial_mass - self.initial_ballast
if __name__ == '__main__':
# initialize Flights. 'PAYLOAD_X_ID' is the digimesh ID of payload X.
fp1 = Payload('profiles/umhab52.json', 1.2)
fp2 = Payload('profiles/umhab48.json', 1.4)
'''
xbee = XBee.init('/dev/xbee', 1200) # initialize serial for XBee, 1200baud
ft = 0 # flight time starts at 0
cur_payload = None
while(True):
# Wait for payloads to request an altitude, send it, and update the
# payload’s mass. Add noise into the system for realistic simulation.
req = alt_request_wait();
if ((req.addr != fp1.addr) and (req.addr != fp2.addr)):
if (fp1.addr == None):
fp1.addr = req.addr
cur_payload = fp1
else if (fp2.addr == None):
fp2.addr = req.addr
cur_payload = fp2
else:
print('Got another XBee\'s frame. Maybe change the network id.')
elif (req.addr == fp1.addr):
print('got a fp1 alt request')
cur_payload = fp1
else:
print('got a fp2 alt request')
cur_payload = fp2
XBee.sendAlt(cur_payload.addr, cur_payload.alt())
fp1.mass -= XBee.getBallastDropped(fp1.id)*mass_noise()
ft += timestep
print(fp.timestep)
print(fp.alts)
print(fp.alts.size)
print(fp.times.size)
print(fp.alt(24))
'''
|
liberza/bacon
|
simulator/payload.py
|
Python
|
mit
| 4,318 | 0.003475 |
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedKFold, KFold
iris = load_iris()
X, y = iris.data, iris.target
print(cross_val_score(LinearSVC(), X, y, cv=KFold(len(X), 3)))
print(cross_val_score(LinearSVC(), X, y, cv=StratifiedKFold(y, 3)))
|
amueller/strata_singapore_2015
|
solutions/cross_validation_iris.py
|
Python
|
cc0-1.0
| 280 | 0 |
# Copyright 2014 Treode, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, time
from functools import total_ordering
from tx_clock import *
@total_ordering
class TxClock(object):
__min_micro_seconds = long(0)
__max_micro_seconds = long(2**63 - 1)
# Bounds on TxClock values
@staticmethod
def min_value():
return TxClock(micro_seconds=TxClock.__min_micro_seconds)
# In Python, longs are actually unbounded, but we'll give TxClock a max
@staticmethod
def max_value():
return TxClock(micro_seconds=TxClock.__max_micro_seconds)
@staticmethod
def now():
current_time_micro_seconds = long(time.time())*10**6
return TxClock(micro_seconds=current_time_micro_seconds)
# Input time in micro-seconds!
def __init__(self, micro_seconds=None):
if (micro_seconds == None):
raise ValueError("Please input time in micro-seconds!")
elif (micro_seconds > TxClock.__max_micro_seconds):
print "micro_seconds: ", micro_seconds
print "max: ", TxClock.__max_micro_seconds
raise ValueError("micro_seconds arg > max micro_seconds value")
# Assume user input time in micro-seconds
else:
self.time = long(micro_seconds)
def to_seconds(self):
# Times are ints, not floats
return self.time / (10**6)
def __repr__(self):
return "TxClock(%s)" % str(self.time)
# Make TxClock instances comparable
def __eq__(self, other):
if (type(other) == TxClock):
return self.time == other.time
else:
return False
def __gt__(self, other):
if (type(other) == TxClock):
return self.time > other.time and not self.__eq__(other)
elif (other == None):
return True
else:
return False
|
Treode/store
|
client/python/tx_clock.py
|
Python
|
apache-2.0
| 2,394 | 0.003759 |
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
from api.base.settings import * # noqa
# TODO ALL SETTINGS FROM API WILL BE IMPORTED AND WILL NEED TO BE OVERRRIDEN
# TODO THIS IS A STEP TOWARD INTEGRATING ADMIN & API INTO ONE PROJECT
# import local # Build own local.py (used with postgres)
# TODO - remove duplicated items, as this is now using settings from the API
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# Don't allow migrations
DATABASE_ROUTERS = ['admin.base.db.router.NoMigrationRouter']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'admin'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'admin-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
# set to False: prereg uses a SPA and ajax and grab the token to use it in the requests
CSRF_COOKIE_HTTPONLY = False
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
USE_L10N = False
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# 3rd party
'django_celery_results',
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'password_reset',
'guardian',
'waffle',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.wiki',
'addons.twofactor',
# Internal apps
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
'admin.desk',
'admin.meetings',
'admin.institutions',
'admin.preprint_providers',
)
MIGRATION_MODULES = {
'osf': None,
'reviews': None,
'addons_osfstorage': None,
'addons_wiki': None,
'addons_twofactor': None,
}
USE_TZ = True
TIME_ZONE = 'UTC'
# local development using https
if osf_settings.SECURE_MODE and osf_settings.DEBUG_MODE:
INSTALLED_APPS += ('sslserver',)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'osf.OSFUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'waffle.middleware.WaffleMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, '../website/static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
# Keen.io settings in local.py
KEEN_PROJECT_ID = osf_settings.KEEN['private']['project_id']
KEEN_READ_KEY = osf_settings.KEEN['private']['read_key']
KEEN_WRITE_KEY = osf_settings.KEEN['private']['write_key']
KEEN_CREDENTIALS = {
'keen_ready': False
}
if KEEN_CREDENTIALS['keen_ready']:
KEEN_CREDENTIALS.update({
'keen_project_id': KEEN_PROJECT_ID,
'keen_read_key': KEEN_READ_KEY,
'keen_write_key': KEEN_WRITE_KEY
})
ENTRY_POINTS = {'osf4m': 'osf4m', 'prereg_challenge_campaign': 'prereg',
'institution_campaign': 'institution'}
# Set in local.py
DESK_KEY = ''
DESK_KEY_SECRET = ''
TINYMCE_APIKEY = ''
SHARE_URL = osf_settings.SHARE_URL
API_DOMAIN = osf_settings.API_DOMAIN
if DEBUG:
INSTALLED_APPS += ('debug_toolbar', 'nplusone.ext.django',)
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware', 'nplusone.ext.django.NPlusOneMiddleware',)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda(_): True,
'DISABLE_PANELS': {
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.redirects.RedirectsPanel'
}
}
# If set to True, automated tests with extra queries will fail.
NPLUSONE_RAISE = False
|
caseyrollins/osf.io
|
admin/base/settings/defaults.py
|
Python
|
apache-2.0
| 7,533 | 0.00146 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.convert.azw3_output_ui import Ui_Form
from calibre.gui2.convert import Widget
font_family_model = None
class PluginWidget(Widget, Ui_Form):
TITLE = _('AZW3 Output')
HELP = _('Options specific to')+' AZW3 '+_('output')
COMMIT_NAME = 'azw3_output'
ICON = I('mimetypes/azw3.png')
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['prefer_author_sort', 'toc_title',
'mobi_toc_at_start',
'dont_compress', 'no_inline_toc', 'share_not_sync',
]
)
self.db, self.book_id = db, book_id
self.initialize_options(get_option, get_help, db, book_id)
|
drxaero/calibre
|
src/calibre/gui2/convert/azw3_output.py
|
Python
|
gpl-3.0
| 978 | 0.00818 |
from django.shortcuts import render
def home(request):
return render(request, "core/home.html")
|
qubs/climate-data-api
|
core/views.py
|
Python
|
apache-2.0
| 102 | 0 |
#!/usr/bin/python
# coding: utf8
"""
Окно просмотра информации по объектам кода (константы, поля, записи, таблицы, списки)
"""
import sys
from PyQt4 import QtGui
class ObjectViewer (QtGui.QTableView):
def __init__ (self, main_window):
QtGui.QTableView.__init__ (self)
main_window.object_viewer = self
self.main_window = main_window
|
rimsleur/QSLime
|
debugger/ObjectViewer.py
|
Python
|
gpl-2.0
| 412 | 0.026239 |
import pygame
def arrow_image(color):
img = pygame.Surface((7, 6))
img.fill((226, 59, 252))
img.set_colorkey((226, 59, 252), pygame.RLEACCEL)
pygame.draw.polygon(img, color, ((0, 0), (3, 3), (6, 0)))
return img
class Menu(object):
def __init__(self, font, options):
self.font = font
self.options = options
self.option = 0
self.height = len(self.options)*(self.font.get_height())+(len(self.options)-1)*3
self.width = 0
for o in self.options:
w = (len(o)+1)*self.font.get_width()
if w > self.width:
self.width = w
def draw(self, surface, pos, background=None, border=None):
ypos = pos[1]
i = 0
if background:
pygame.draw.rect(surface, background, (pos[0]-4, pos[1]-4,
self.width+8, self.height+6))
if border:
pygame.draw.rect(surface, border, (pos[0]-4, pos[1]-4,
self.width+8, self.height+8), 1)
for opt in self.options:
if i == self.option:
icon = ">"
else:
icon = " "
ren = self.font.render(icon + opt)
surface.blit(ren, (pos[0], ypos))
ypos += ren.get_height()+3
i += 1
def move_cursor(self, dir):
if dir > 0:
if self.option < len(self.options)-1:
self.option += 1
elif dir < 0:
if self.option > 0:
self.option -= 1
def get_option(self):
return self.option, self.options[self.option]
class DialogBox(object):
def __init__(self, size, background_color, border_color, font):
self.dialog = []
self.image = pygame.Surface(size)
self.font = font
self.size = size
self.background_color = background_color
self.border_color = border_color
self.update_box()
self.text_pos = 0
self.shown = False
self.scroll_delay = 1
self.frame = 0
self.down_arrow = arrow_image(font.color)
self.curr_dialog=0
def set_scrolldelay(self, delay):
self.scroll_delay = delay
def set_dialog(self, dialog_list):
self.page = 0
self.pages = len(dialog_list)
self.dialog = dialog_list
self.shown = True
self.text_pos = 0
def update_box(self):
self.image.fill(self.background_color)
pygame.draw.rect(self.image, self.border_color,
(0, 0, self.size[0]-1, self.size[1]-1), 1)
def progress(self):
if (self.curr_dialog==0):
return
if (self.text_pos >= len(self.curr_dialog)):
if self.page < self.pages-1:
self.page += 1
self.text_pos = 0
else:
self.shown = False
else:
self.text_pos = len(self.curr_dialog)
def draw(self, surface, pos):
if self.shown and self.page < self.pages:
self.update_box()
self.curr_dialog = self.dialog[self.page]
xpos = 4
ypos = 4
if self.text_pos < len(self.curr_dialog):
self.frame -= 1
if self.frame <= 0:
self.text_pos += 1
self.frame = self.scroll_delay
else:
self.image.blit(self.down_arrow,
(self.image.get_width()-12,
self.image.get_height()-8))
dialog = self.curr_dialog[:self.text_pos]
for word in dialog.split(" "):
ren = self.font.render(word + " ")
w = ren.get_width()
if xpos > self.image.get_width()-w:
ypos += ren.get_height()+3
xpos = 4
self.image.blit(ren, (xpos, ypos))
xpos += w
surface.blit(self.image, pos)
def over(self):
return self.shown != True
def close(self):
self.shown = False
self.page = self.pages
|
jmimu/pyNekketsu
|
retrogamelib/dialog.py
|
Python
|
gpl-3.0
| 4,110 | 0.007056 |
#!/usr/bin/env python
#
# barman - Backup and Recovery Manager for PostgreSQL
#
# Copyright (C) 2011-2015 2ndQuadrant Italia (Devise.IT S.r.l.) <info@2ndquadrant.it>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Backup and Recovery Manager for PostgreSQL
Barman (Backup and Recovery Manager) is an open source administration
tool for disaster recovery of PostgreSQL servers written in Python.
It allows your organisation to perform remote backups of multiple servers
in business critical environments and help DBAs during the recovery
phase. Barman's most requested features include backup catalogues,
incremental backup, retention policies, remote backup and recovery,
archiving and compression of WAL files and backups.
Barman is written and maintained by PostgreSQL professionals 2ndQuadrant.
"""
import sys
# support fo running test through setup.py
# requires recent setuptools library
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
cmdclass={'test': PyTest}
except ImportError:
from distutils.core import setup
cmdclass={}
if sys.version_info < (2, 6):
raise SystemExit('ERROR: Barman needs at least python 2.6 to work')
install_requires = ['psycopg2', 'argh >= 0.21.2', 'python-dateutil', 'argcomplete']
if sys.version_info < (2, 7):
install_requires.append('argparse')
barman = {}
with open('barman/version.py', 'r') as fversion:
exec (fversion.read(), barman)
setup(
name='barman',
version=barman['__version__'],
author='2ndQuadrant Italia (Devise.IT S.r.l.)',
author_email='info@2ndquadrant.it',
url='http://www.pgbarman.org/',
packages=['barman', ],
scripts=['bin/barman', ],
data_files=[
('share/man/man1', ['doc/barman.1']),
('share/man/man5', ['doc/barman.5']),
],
license='GPL-3.0',
description=__doc__.split("\n")[0],
long_description="\n".join(__doc__.split("\n")[2:]),
install_requires=install_requires,
platforms=['Linux', 'Mac OS X'],
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Topic :: System :: Archiving :: Backup',
'Topic :: Database',
'Topic :: System :: Recovery Tools',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
tests_require=['pytest', 'mock', 'pytest-capturelog', 'pytest-timeout'],
cmdclass=cmdclass,
use_2to3=True,
)
|
terrorobe/barman
|
setup.py
|
Python
|
gpl-3.0
| 3,778 | 0.001853 |
# coding: utf-8
#
# commands.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module implements the Sublime Text commands provided by SublimeLinter."""
import datetime
from fnmatch import fnmatch
from glob import glob
import json
import os
import re
import shutil
import subprocess
import tempfile
from textwrap import TextWrapper
from threading import Thread
import time
import sublime
import sublime_plugin
from .lint import highlight, linter, persist, util
def error_command(method):
"""
A decorator that executes method only if the current view has errors.
This decorator is meant to be used only with the run method of
sublime_plugin.TextCommand subclasses.
A wrapped version of method is returned.
"""
def run(self, edit, **kwargs):
vid = self.view.id()
if vid in persist.errors and persist.errors[vid]:
method(self, self.view, persist.errors[vid], persist.highlights[vid], **kwargs)
else:
sublime.message_dialog('No lint errors.')
return run
def select_line(view, line):
"""Change view's selection to be the given line."""
point = view.text_point(line, 0)
sel = view.sel()
sel.clear()
sel.add(view.line(point))
class SublimelinterLintCommand(sublime_plugin.TextCommand):
"""A command that lints the current view if it has a linter."""
def is_enabled(self):
"""
Return True if the current view can be linted.
If the view has *only* file-only linters, it can be linted
only if the view is not dirty.
Otherwise it can be linted.
"""
has_non_file_only_linter = False
vid = self.view.id()
linters = persist.view_linters.get(vid, [])
for lint in linters:
if lint.tempfile_suffix != '-':
has_non_file_only_linter = True
break
if not has_non_file_only_linter:
return not self.view.is_dirty()
return True
def run(self, edit):
"""Lint the current view."""
from .sublimelinter import SublimeLinter
SublimeLinter.shared_plugin().lint(self.view.id())
class HasErrorsCommand:
"""
A mixin class for sublime_plugin.TextCommand subclasses.
Inheriting from this class will enable the command only if the current view has errors.
"""
def is_enabled(self):
"""Return True if the current view has errors."""
vid = self.view.id()
return vid in persist.errors and len(persist.errors[vid]) > 0
class GotoErrorCommand(sublime_plugin.TextCommand):
"""A superclass for commands that go to the next/previous error."""
def goto_error(self, view, errors, direction='next'):
"""Go to the next/previous error in view."""
sel = view.sel()
if len(sel) == 0:
sel.add(sublime.Region(0, 0))
saved_sel = tuple(sel)
empty_selection = len(sel) == 1 and sel[0].empty()
# sublime.Selection() changes the view's selection, get the point first
point = sel[0].begin() if direction == 'next' else sel[-1].end()
regions = sublime.Selection(view.id())
regions.clear()
for error_type in (highlight.WARNING, highlight.ERROR):
regions.add_all(view.get_regions(highlight.MARK_KEY_FORMAT.format(error_type)))
region_to_select = None
# If going forward, find the first region beginning after the point.
# If going backward, find the first region ending before the point.
# If nothing is found in the given direction, wrap to the first/last region.
if direction == 'next':
for region in regions:
if (
(point == region.begin() and empty_selection and not region.empty()) or
(point < region.begin())
):
region_to_select = region
break
else:
for region in reversed(regions):
if (
(point == region.end() and empty_selection and not region.empty()) or
(point > region.end())
):
region_to_select = region
break
# If there is only one error line and the cursor is in that line, we cannot move.
# Otherwise wrap to the first/last error line unless settings disallow that.
if region_to_select is None and ((len(regions) > 1 or not regions[0].contains(point))):
if persist.settings.get('wrap_find', True):
region_to_select = regions[0] if direction == 'next' else regions[-1]
if region_to_select is not None:
self.select_lint_region(self.view, region_to_select)
else:
sel.clear()
sel.add_all(saved_sel)
sublime.message_dialog('No {0} lint error.'.format(direction))
@classmethod
def select_lint_region(cls, view, region):
"""
Select and scroll to the first marked region that contains region.
If none are found, the beginning of region is used. The view is
centered on the calculated region and the region is selected.
"""
marked_region = cls.find_mark_within(view, region)
if marked_region is None:
marked_region = sublime.Region(region.begin(), region.begin())
sel = view.sel()
sel.clear()
sel.add(marked_region)
# There is a bug in ST3 that prevents the selection from changing
# when a quick panel is open and the viewport does not change position,
# so we call our own custom method that works around that.
util.center_region_in_view(marked_region, view)
@classmethod
def find_mark_within(cls, view, region):
"""Return the nearest marked region that contains region, or None if none found."""
marks = view.get_regions(highlight.MARK_KEY_FORMAT.format(highlight.WARNING))
marks.extend(view.get_regions(highlight.MARK_KEY_FORMAT.format(highlight.ERROR)))
marks.sort(key=sublime.Region.begin)
for mark in marks:
if mark.contains(region):
return mark
return None
class SublimelinterGotoErrorCommand(GotoErrorCommand):
"""A command that selects the next/previous error."""
@error_command
def run(self, view, errors, highlights, **kwargs):
"""Run the command."""
self.goto_error(view, errors, **kwargs)
class SublimelinterShowAllErrors(sublime_plugin.TextCommand):
"""A command that shows a quick panel with all of the errors in the current view."""
@error_command
def run(self, view, errors, highlights):
"""Run the command."""
self.errors = errors
self.highlights = highlights
self.points = []
options = []
for lineno, line_errors in sorted(errors.items()):
if persist.settings.get("passive_warnings", False):
if self.highlights.line_type(lineno) != highlight.ERROR:
continue
line = view.substr(view.full_line(view.text_point(lineno, 0))).rstrip('\n\r')
# Strip whitespace from the front of the line, but keep track of how much was
# stripped so we can adjust the column.
diff = len(line)
line = line.lstrip()
diff -= len(line)
max_prefix_len = 40
for column, message in sorted(line_errors):
# Keep track of the line and column
point = view.text_point(lineno, column)
self.points.append(point)
# If there are more than max_prefix_len characters before the adjusted column,
# lop off the excess and insert an ellipsis.
column = max(column - diff, 0)
if column > max_prefix_len:
visible_line = '...' + line[column - max_prefix_len:]
column = max_prefix_len + 3 # 3 for ...
else:
visible_line = line
# Insert an arrow at the column in the stripped line
code = visible_line[:column] + '➜' + visible_line[column:]
options.append(['{} {}'.format(lineno + 1, message), code])
self.viewport_pos = view.viewport_position()
self.selection = list(view.sel())
view.window().show_quick_panel(
options,
on_select=self.select_error,
on_highlight=self.select_error
)
def select_error(self, index):
"""Completion handler for the quick panel. Selects the indexed error."""
if index != -1:
point = self.points[index]
GotoErrorCommand.select_lint_region(self.view, sublime.Region(point, point))
else:
self.view.set_viewport_position(self.viewport_pos)
self.view.sel().clear()
self.view.sel().add_all(self.selection)
class SublimelinterToggleSettingCommand(sublime_plugin.WindowCommand):
"""Command that toggles a setting."""
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
def is_visible(self, **args):
"""Return True if the opposite of the setting is True."""
if args.get('checked', False):
return True
if persist.settings.has_setting(args['setting']):
setting = persist.settings.get(args['setting'], None)
return setting is not None and setting is not args['value']
else:
return args['value'] is not None
def is_checked(self, **args):
"""Return True if the setting should be checked."""
if args.get('checked', False):
setting = persist.settings.get(args['setting'], False)
return setting is True
else:
return False
def run(self, **args):
"""Toggle the setting if value is boolean, or remove it if None."""
if 'value' in args:
if args['value'] is None:
persist.settings.pop(args['setting'])
else:
persist.settings.set(args['setting'], args['value'], changed=True)
else:
setting = persist.settings.get(args['setting'], False)
persist.settings.set(args['setting'], not setting, changed=True)
persist.settings.save()
class ChooseSettingCommand(sublime_plugin.WindowCommand):
"""An abstract base class for commands that choose a setting from a list."""
def __init__(self, window, setting=None, preview=False):
"""Initialize a new instance."""
super().__init__(window)
self.setting = setting
self._settings = None
self.preview = preview
def description(self, **args):
"""Return the visible description of the command, used in menus."""
return args.get('value', None)
def is_checked(self, **args):
"""Return whether this command should be checked in a menu."""
if 'value' not in args:
return False
item = self.transform_setting(args['value'], matching=True)
setting = self.setting_value(matching=True)
return item == setting
def _get_settings(self):
"""Return the list of settings."""
if self._settings is None:
self._settings = self.get_settings()
return self._settings
settings = property(_get_settings)
def get_settings(self):
"""Return the list of settings. Subclasses must override this."""
raise NotImplementedError
def transform_setting(self, setting, matching=False):
"""
Transform the display text for setting to the form it is stored in.
By default, returns a lowercased copy of setting.
"""
return setting.lower()
def setting_value(self, matching=False):
"""Return the current value of the setting."""
return self.transform_setting(persist.settings.get(self.setting, ''), matching=matching)
def on_highlight(self, index):
"""If preview is on, set the selected setting."""
if self.preview:
self.set(index)
def choose(self, **kwargs):
"""
Choose or set the setting.
If 'value' is in kwargs, the setting is set to the corresponding value.
Otherwise the list of available settings is built via get_settings
and is displayed in a quick panel. The current value of the setting
is initially selected in the quick panel.
"""
if 'value' in kwargs:
setting = self.transform_setting(kwargs['value'])
else:
setting = self.setting_value(matching=True)
index = 0
for i, s in enumerate(self.settings):
if isinstance(s, (tuple, list)):
s = self.transform_setting(s[0])
else:
s = self.transform_setting(s)
if s == setting:
index = i
break
if 'value' in kwargs:
self.set(index)
else:
self.previous_setting = self.setting_value()
self.window.show_quick_panel(
self.settings,
on_select=self.set,
selected_index=index,
on_highlight=self.on_highlight)
def set(self, index):
"""Set the value of the setting."""
if index == -1:
if self.settings_differ(self.previous_setting, self.setting_value()):
self.update_setting(self.previous_setting)
return
setting = self.selected_setting(index)
if isinstance(setting, (tuple, list)):
setting = setting[0]
setting = self.transform_setting(setting)
if not self.settings_differ(persist.settings.get(self.setting, ''), setting):
return
self.update_setting(setting)
def update_setting(self, value):
"""Update the setting with the given value."""
persist.settings.set(self.setting, value, changed=True)
self.setting_was_changed(value)
persist.settings.save()
def settings_differ(self, old_setting, new_setting):
"""Return whether two setting values differ."""
if isinstance(new_setting, (tuple, list)):
new_setting = new_setting[0]
new_setting = self.transform_setting(new_setting)
return new_setting != old_setting
def selected_setting(self, index):
"""
Return the selected setting by index.
Subclasses may override this if they want to return something other
than the indexed value from self.settings.
"""
return self.settings[index]
def setting_was_changed(self, setting):
"""
Do something after the setting value is changed but before settings are saved.
Subclasses may override this if further action is necessary after
the setting's value is changed.
"""
pass
def choose_setting_command(setting, preview):
"""Return a decorator that provides common methods for concrete subclasses of ChooseSettingCommand."""
def decorator(cls):
def init(self, window):
super(cls, self).__init__(window, setting, preview)
def run(self, **kwargs):
"""Run the command."""
self.choose(**kwargs)
cls.setting = setting
cls.__init__ = init
cls.run = run
return cls
return decorator
@choose_setting_command('lint_mode', preview=False)
class SublimelinterChooseLintModeCommand(ChooseSettingCommand):
"""A command that selects a lint mode from a list."""
def get_settings(self):
"""Return a list of the lint modes."""
return [[name.capitalize(), description] for name, description in persist.LINT_MODES]
def setting_was_changed(self, setting):
"""Update all views when the lint mode changes."""
if setting == 'background':
from .sublimelinter import SublimeLinter
SublimeLinter.lint_all_views()
else:
linter.Linter.clear_all()
@choose_setting_command('mark_style', preview=True)
class SublimelinterChooseMarkStyleCommand(ChooseSettingCommand):
"""A command that selects a mark style from a list."""
def get_settings(self):
"""Return a list of the mark styles."""
return highlight.mark_style_names()
@choose_setting_command('gutter_theme', preview=True)
class SublimelinterChooseGutterThemeCommand(ChooseSettingCommand):
"""A command that selects a gutter theme from a list."""
def get_settings(self):
"""
Return a list of all available gutter themes, with 'None' at the end.
Whether the theme is colorized and is a SublimeLinter or user theme
is indicated below the theme name.
"""
settings = self.find_gutter_themes()
settings.append(['None', 'Do not display gutter marks'])
self.themes.append('none')
return settings
def find_gutter_themes(self):
"""
Find all SublimeLinter.gutter-theme resources.
For each found resource, if it doesn't match one of the patterns
from the "gutter_theme_excludes" setting, return the base name
of resource and info on whether the theme is a standard theme
or a user theme, as well as whether it is colorized.
The list of paths to the resources is appended to self.themes.
"""
self.themes = []
settings = []
gutter_themes = sublime.find_resources('*.gutter-theme')
excludes = persist.settings.get('gutter_theme_excludes', [])
pngs = sublime.find_resources('*.png')
for theme in gutter_themes:
# Make sure the theme has error.png and warning.png
exclude = False
parent = os.path.dirname(theme)
for name in ('error', 'warning'):
if '{}/{}.png'.format(parent, name) not in pngs:
exclude = True
if exclude:
continue
# Now see if the theme name is in gutter_theme_excludes
name = os.path.splitext(os.path.basename(theme))[0]
for pattern in excludes:
if fnmatch(name, pattern):
exclude = True
break
if exclude:
continue
self.themes.append(theme)
try:
info = json.loads(sublime.load_resource(theme))
colorize = info.get('colorize', False)
except ValueError:
colorize = False
std_theme = theme.startswith('Packages/SublimeLinter/gutter-themes/')
settings.append([
name,
'{}{}'.format(
'SublimeLinter theme' if std_theme else 'User theme',
' (colorized)' if colorize else ''
)
])
# Sort self.themes and settings in parallel using the zip trick
settings, self.themes = zip(*sorted(zip(settings, self.themes)))
# zip returns tuples, convert back to lists
settings = list(settings)
self.themes = list(self.themes)
return settings
def selected_setting(self, index):
"""Return the theme name with the given index."""
return self.themes[index]
def transform_setting(self, setting, matching=False):
"""
Return a transformed version of setting.
For gutter themes, setting is a Packages-relative path
to a .gutter-theme file.
If matching == False, return the original setting text,
gutter theme settings are not lowercased.
If matching == True, return the base name of the filename
without the .gutter-theme extension.
"""
if matching:
return os.path.splitext(os.path.basename(setting))[0]
else:
return setting
class SublimelinterToggleLinterCommand(sublime_plugin.WindowCommand):
"""A command that toggles, enables, or disables linter plugins."""
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
self.linters = {}
def is_visible(self, **args):
"""Return True if the command would show any linters."""
which = args['which']
if self.linters.get(which) is None:
linters = []
settings = persist.settings.get('linters', {})
for instance in persist.linter_classes:
linter_settings = settings.get(instance, {})
disabled = linter_settings.get('@disable')
if which == 'all':
include = True
instance = [instance, 'disabled' if disabled else 'enabled']
else:
include = (
which == 'enabled' and not disabled or
which == 'disabled' and disabled
)
if include:
linters.append(instance)
linters.sort()
self.linters[which] = linters
return len(self.linters[which]) > 0
def run(self, **args):
"""Run the command."""
self.which = args['which']
if self.linters[self.which]:
self.window.show_quick_panel(self.linters[self.which], self.on_done)
def on_done(self, index):
"""Completion handler for quick panel, toggle the enabled state of the chosen linter."""
if index != -1:
linter = self.linters[self.which][index]
if isinstance(linter, list):
linter = linter[0]
settings = persist.settings.get('linters', {})
linter_settings = settings.get(linter, {})
linter_settings['@disable'] = not linter_settings.get('@disable', False)
persist.settings.set('linters', settings, changed=True)
persist.settings.save()
self.linters = {}
class SublimelinterCreateLinterPluginCommand(sublime_plugin.WindowCommand):
"""A command that creates a new linter plugin."""
def run(self):
"""Run the command."""
if not sublime.ok_cancel_dialog(
'You will be asked for the linter name. Please enter the name '
'of the linter binary (including dashes), NOT the name of the language being linted. '
'For example, to lint CSS with csslint, the linter name is '
'“csslint”, NOT “css”.',
'I understand'
):
return
self.window.show_input_panel(
'Linter name:',
'',
on_done=self.copy_linter,
on_change=None,
on_cancel=None)
def copy_linter(self, name):
"""Copy the template linter to a new linter with the given name."""
self.name = name
self.fullname = 'SublimeLinter-contrib-{}'.format(name)
self.dest = os.path.join(sublime.packages_path(), self.fullname)
if os.path.exists(self.dest):
sublime.error_message('The plugin “{}” already exists.'.format(self.fullname))
return
src = os.path.join(sublime.packages_path(), persist.PLUGIN_DIRECTORY, 'linter-plugin-template')
self.temp_dir = None
try:
self.temp_dir = tempfile.mkdtemp()
self.temp_dest = os.path.join(self.temp_dir, self.fullname)
shutil.copytree(src, self.temp_dest)
self.get_linter_language(name, self.configure_linter)
except Exception as ex:
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
sublime.error_message('An error occurred while copying the template plugin: {}'.format(str(ex)))
def configure_linter(self, language):
"""Fill out the template and move the linter into Packages."""
try:
if language is None:
return
if not self.fill_template(self.temp_dir, self.name, self.fullname, language):
return
git = util.which('git')
if git:
subprocess.call((git, 'init', self.temp_dest))
shutil.move(self.temp_dest, self.dest)
util.open_directory(self.dest)
self.wait_for_open(self.dest)
except Exception as ex:
sublime.error_message('An error occurred while configuring the plugin: {}'.format(str(ex)))
finally:
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def get_linter_language(self, name, callback):
"""Get the language (python, node, etc.) on which the linter is based."""
languages = ['javascript', 'python', 'ruby', 'other']
items = ['Select the language on which the linter is based:']
for language in languages:
items.append(' ' + language.capitalize())
def on_done(index):
language = languages[index - 1] if index > 0 else None
callback(language)
self.window.show_quick_panel(items, on_done)
def fill_template(self, template_dir, name, fullname, language):
"""Replace placeholders and fill template files in template_dir, return success."""
# Read per-language info
path = os.path.join(os.path.dirname(__file__), 'create_linter_info.json')
with open(path, mode='r', encoding='utf-8') as f:
try:
info = json.load(f)
except Exception as err:
persist.printf(err)
sublime.error_message('A configuration file could not be opened, the linter cannot be created.')
return False
info = info.get(language, {})
extra_attributes = []
comment_re = info.get('comment_re', 'None')
extra_attributes.append('comment_re = ' + comment_re)
attributes = info.get('attributes', [])
for attr in attributes:
extra_attributes.append(attr.format(name))
extra_attributes = '\n '.join(extra_attributes)
if extra_attributes:
extra_attributes += '\n'
extra_steps = info.get('extra_steps', '')
if isinstance(extra_steps, list):
extra_steps = '\n\n'.join(extra_steps)
if extra_steps:
extra_steps = '\n' + extra_steps + '\n'
platform = info.get('platform', language.capitalize())
# Replace placeholders
placeholders = {
'__linter__': name,
'__user__': util.get_user_fullname(),
'__year__': str(datetime.date.today().year),
'__class__': self.camel_case(name),
'__superclass__': info.get('superclass', 'Linter'),
'__cmd__': '{}@python'.format(name) if language == 'python' else name,
'# __extra_attributes__': extra_attributes,
'__platform__': platform,
'__install__': info['installer'].format(name),
'__extra_install_steps__': extra_steps
}
for root, dirs, files in os.walk(template_dir):
for filename in files:
extension = os.path.splitext(filename)[1]
if extension in ('.py', '.md', '.txt'):
path = os.path.join(root, filename)
with open(path, encoding='utf-8') as f:
text = f.read()
for placeholder, value in placeholders.items():
text = text.replace(placeholder, value)
with open(path, mode='w', encoding='utf-8') as f:
f.write(text)
return True
def camel_case(self, name):
"""Convert and return a name in the form foo-bar to FooBar."""
camel_name = name[0].capitalize()
i = 1
while i < len(name):
if name[i] == '-' and i < len(name) - 1:
camel_name += name[i + 1].capitalize()
i += 1
else:
camel_name += name[i]
i += 1
return camel_name
def wait_for_open(self, dest):
"""Wait for new linter window to open in another thread."""
def open_linter_py():
"""Wait until the new linter window has opened and open linter.py."""
start = datetime.datetime.now()
while True:
time.sleep(0.25)
delta = datetime.datetime.now() - start
# Wait a maximum of 5 seconds
if delta.seconds > 5:
break
window = sublime.active_window()
folders = window.folders()
if folders and folders[0] == dest:
window.open_file(os.path.join(dest, 'linter.py'))
break
sublime.set_timeout_async(open_linter_py, 0)
class SublimelinterPackageControlCommand(sublime_plugin.WindowCommand):
"""
Abstract superclass for Package Control utility commands.
Only works if git is installed.
"""
TAG_RE = re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<release>\d+)(?:\+\d+)?')
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
self.git = ''
def is_visible(self, paths=[]):
"""Return True if any eligible plugin directories are selected."""
if self.git == '':
self.git = util.which('git')
if self.git:
for path in paths:
if self.is_eligible_path(path):
return True
return False
def is_eligible_path(self, path):
"""
Return True if path is an eligible directory.
A directory is eligible if it has a messages subdirectory
and has messages.json.
"""
return (
os.path.isdir(path) and
os.path.isdir(os.path.join(path, 'messages')) and
os.path.isfile(os.path.join(path, 'messages.json'))
)
def get_current_tag(self):
"""
Return the most recent tag components.
A tuple of (major, minor, release) is returned, or (1, 0, 0) if there are no tags.
If the most recent tag does not conform to semver, return (None, None, None).
"""
tag = util.communicate(['git', 'describe', '--tags', '--abbrev=0']).strip()
if not tag:
return (1, 0, 0)
match = self.TAG_RE.match(tag)
if match:
return (int(match.group('major')), int(match.group('minor')), int(match.group('release')))
else:
return None
class SublimelinterNewPackageControlMessageCommand(SublimelinterPackageControlCommand):
"""
This command automates the process of creating new Package Control release messages.
It creates a new entry in messages.json for the next version
and creates a new file named messages/<version>.txt.
"""
COMMIT_MSG_RE = re.compile(r'{{{{(.+?)}}}}')
def __init__(self, window):
"""Initialize a new instance."""
super().__init__(window)
def run(self, paths=[]):
"""Run the command."""
for path in paths:
if self.is_eligible_path(path):
self.make_new_version_message(path)
def make_new_version_message(self, path):
"""Make a new version message for the repo at the given path."""
try:
cwd = os.getcwd()
os.chdir(path)
version = self.get_current_tag()
if version[0] is None:
return
messages_path = os.path.join(path, 'messages.json')
message_path = self.rewrite_messages_json(messages_path, version)
if os.path.exists(message_path):
os.remove(message_path)
with open(message_path, mode='w', encoding='utf-8') as f:
header = '{} {}'.format(
os.path.basename(path),
os.path.splitext(os.path.basename(message_path))[0])
f.write('{}\n{}\n'.format(header, '-' * (len(header) + 1)))
f.write(self.get_commit_messages_since(version))
self.window.run_command('open_file', args={'file': message_path})
except Exception:
import traceback
traceback.print_exc()
finally:
os.chdir(cwd)
def rewrite_messages_json(self, messages_path, tag):
"""Add an entry in messages.json for tag, return relative path to the file."""
with open(messages_path, encoding='utf-8') as f:
messages = json.load(f)
major, minor, release = tag
release += 1
tag = '{}.{}.{}'.format(major, minor, release)
message_path = os.path.join('messages', '{}.txt'.format(tag))
messages[tag] = message_path
message_path = os.path.join(os.path.dirname(messages_path), message_path)
with open(messages_path, mode='w', encoding='utf-8') as f:
messages_json = '{\n'
sorted_messages = []
if 'install' in messages:
install_message = messages.pop('install')
sorted_messages.append(' "install": "{}"'.format(install_message))
keys = sorted(map(self.sortable_tag, messages.keys()))
for _, key in keys:
sorted_messages.append(' "{}": "{}"'.format(key, messages[key]))
messages_json += ',\n'.join(sorted_messages)
messages_json += '\n}\n'
f.write(messages_json)
return message_path
def sortable_tag(self, tag):
"""Return a version tag in a sortable form."""
if tag == 'install':
return (tag, tag)
major, minor, release = tag.split('.')
if '+' in release:
release, update = release.split('+')
update = '+{:04}'.format(int(update))
else:
update = ''
return ('{:04}.{:04}.{:04}{}'.format(int(major), int(minor), int(release), update), tag)
def get_commit_messages_since(self, version):
"""Return a formatted list of commit messages since the given tagged version."""
tag = '{}.{}.{}'.format(*version)
output = util.communicate([
'git', 'log',
'--pretty=format:{{{{%w(0,0,0)%s %b}}}}',
'--reverse', tag + '..'
])
# Split the messages, they are bounded by {{{{ }}}}
messages = []
for match in self.COMMIT_MSG_RE.finditer(output):
messages.append(match.group(1).strip())
# Wrap the messages
wrapper = TextWrapper(initial_indent='- ', subsequent_indent=' ')
messages = list(map(lambda msg: '\n'.join(wrapper.wrap(msg)), messages))
return '\n\n'.join(messages) + '\n'
class SublimelinterClearColorSchemeFolderCommand(sublime_plugin.WindowCommand):
"""A command that clears all of SublimeLinter made color schemes."""
def run(self):
"""Run the command."""
base_path = os.path.join(sublime.packages_path(), 'User', '*.tmTheme')
sublime_path = os.path.join(sublime.packages_path(), 'User', 'SublimeLinter', '*.tmTheme')
themes = glob(base_path) + glob(sublime_path)
prefs = sublime.load_settings('Preferences.sublime-settings')
scheme = prefs.get('color_scheme')
for theme in themes:
# Ensure it is a (SL) theme and it is not current current scheme
if re.search(r'\(SL\)', theme) and os.path.normpath(scheme) not in theme:
persist.debug('deleting {}'.format(os.path.split(theme)[1]))
os.remove(theme)
class SublimelinterClearCachesCommand(sublime_plugin.WindowCommand):
"""A command that clears all of SublimeLinter's internal caches."""
def run(self):
"""Run the command."""
util.clear_path_caches()
util.get_rc_settings.cache_clear()
util.find_file.cache_clear()
linter.Linter.clear_settings_caches()
class SublimelinterReportCommand(sublime_plugin.WindowCommand):
"""
A command that displays a report of all errors.
The scope of the report is all open files in the current window,
all files in all folders in the current window, or both.
"""
def run(self, on='files'):
"""Run the command. on determines the scope of the report."""
output = self.window.new_file()
output.set_name('{} Error Report'.format(persist.PLUGIN_NAME))
output.set_scratch(True)
from .sublimelinter import SublimeLinter
self.plugin = SublimeLinter.shared_plugin()
if on == 'files' or on == 'both':
for view in self.window.views():
self.report(output, view)
if on == 'folders' or on == 'both':
for folder in self.window.folders():
self.folder(output, folder)
def folder(self, output, folder):
"""Report on all files in a folder."""
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
# Ignore files over 256K to speed things up a bit
if os.stat(path).st_size < 256 * 1024:
# TODO: not implemented
pass
def report(self, output, view):
"""Write a report on the given view to output."""
def finish_lint(view, linters, hit_time):
if not linters:
return
def insert(edit):
if not any(l.errors for l in linters):
return
filename = os.path.basename(linters[0].filename or 'untitled')
out = '\n{}:\n'.format(filename)
for lint in sorted(linters, key=lambda lint: lint.name):
if lint.errors:
out += '\n {}:\n'.format(lint.name)
items = sorted(lint.errors.items())
# Get the highest line number so we know how much padding numbers need
highest_line = items[-1][0]
width = 1
while highest_line >= 10:
highest_line /= 10
width += 1
for line, messages in items:
for col, message in messages:
out += ' {:>{width}}: {}\n'.format(line + 1, message, width=width)
output.insert(edit, output.size(), out)
persist.edits[output.id()].append(insert)
output.run_command('sublimelinter_edit')
kwargs = {'self': self.plugin, 'view_id': view.id(), 'callback': finish_lint}
from .sublimelinter import SublimeLinter
Thread(target=SublimeLinter.lint, kwargs=kwargs).start()
|
michael-ball/sublime-text
|
sublime-text-3/Packages/SublimeLinter/commands.py
|
Python
|
unlicense
| 39,327 | 0.001653 |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import errno
import json
import os
import re
import ssl
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
from pyversion import is_python3
if is_python3():
import urllib.request
import urllib.error
else:
import urllib2
import imp
urllib = imp.new_module('urllib')
urllib.request = urllib2
urllib.error = urllib2
from signal import SIGTERM
from error import GitError, UploadError
import platform_utils
from repo_trace import Trace
if is_python3():
from http.client import HTTPException
else:
from httplib import HTTPException
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
from git_refs import R_CHANGES, R_HEADS, R_TAGS
ID_RE = re.compile(r'^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsChange(rev):
return rev.startswith(R_CHANGES)
def IsId(rev):
return ID_RE.match(rev)
def IsTag(rev):
return rev.startswith(R_TAGS)
def IsImmutable(rev):
return IsChange(rev) or IsId(rev) or IsTag(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(configfile = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(configfile = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, configfile, defaults=None, jsonFile=None):
self.file = configfile
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
self._json = jsonFile
if self._json is None:
self._json = os.path.join(
os.path.dirname(self.file),
'.repo_' + os.path.basename(self.file) + '.json')
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all_keys=False):
"""Get the first value for a key, or None if it is not defined.
This configuration file is used first, if the key is not
defined or all_keys = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all_keys = all_keys)
v = []
if not all_keys:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all_keys = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in range(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
def UrlInsteadOf(self, url):
"""Resolve any url.*.insteadof references.
"""
for new_url in self.GetSubSections('url'):
for old_url in self.GetString('url.%s.insteadof' % new_url, True):
if old_url is not None and url.startswith(old_url):
return new_url + url[len(old_url):]
return url
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadJson()
if d is None:
d = self._ReadGit()
self._SaveJson(d)
return d
def _ReadJson(self):
try:
if os.path.getmtime(self._json) \
<= os.path.getmtime(self.file):
platform_utils.remove(self._json)
return None
except OSError:
return None
try:
Trace(': parsing %s', self.file)
with open(self._json) as fd:
return json.load(fd)
except (IOError, ValueError):
platform_utils.remove(self._json)
return None
def _SaveJson(self, cache):
try:
with open(self._json, 'w') as fd:
json.dump(cache, fd, indent=2)
except (IOError, TypeError):
if os.path.exists(self._json):
platform_utils.remove(self._json)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
if not is_python3():
d = d.decode('utf-8')
for line in d.rstrip('\0').split('\0'):
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
return p.stdout
else:
GitError('git config %s: %s' % (str(args), p.stderr))
class RefSpec(object):
"""A Git refspec line, split into its components:
forced: True if the line starts with '+'
src: Left side of the line
dst: Right side of the line
"""
@classmethod
def FromString(cls, rs):
lhs, rhs = rs.split(':', 2)
if lhs.startswith('+'):
lhs = lhs[1:]
forced = True
else:
forced = False
return cls(forced, lhs, rhs)
def __init__(self, forced, lhs, rhs):
self.forced = forced
self.src = lhs
self.dst = rhs
def SourceMatches(self, rev):
if self.src:
if rev == self.src:
return True
if self.src.endswith('/*') and rev.startswith(self.src[:-1]):
return True
return False
def DestMatches(self, ref):
if self.dst:
if ref == self.dst:
return True
if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]):
return True
return False
def MapSource(self, rev):
if self.src.endswith('/*'):
return self.dst[:-1] + rev[len(self.src) - 1:]
return self.dst
def __str__(self):
s = ''
if self.forced:
s += '+'
if self.src:
s += self.src
if self.dst:
s += ':'
s += self.dst
return s
_master_processes = []
_master_keys = set()
_ssh_master = True
_master_keys_lock = None
def init_ssh():
"""Should be called once at the start of repo to init ssh master handling.
At the moment, all we do is to create our lock.
"""
global _master_keys_lock
assert _master_keys_lock is None, "Should only call init_ssh once"
_master_keys_lock = _threading.Lock()
def _open_ssh(host, port=None):
global _ssh_master
# Acquire the lock. This is needed to prevent opening multiple masters for
# the same host when we're running "repo sync -jN" (for N > 1) _and_ the
# manifest <remote fetch="ssh://xyz"> specifies a different host from the
# one that was passed to repo init.
_master_keys_lock.acquire()
try:
# Check to see whether we already think that the master is running; if we
# think it's already running, return right away.
if port is not None:
key = '%s:%s' % (host, port)
else:
key = host
if key in _master_keys:
return True
if not _ssh_master \
or 'GIT_SSH' in os.environ \
or sys.platform in ('win32', 'cygwin'):
# failed earlier, or cygwin ssh can't do this
#
return False
# We will make two calls to ssh; this is the common part of both calls.
command_base = ['ssh',
'-o','ControlPath %s' % ssh_sock(),
host]
if port is not None:
command_base[1:1] = ['-p', str(port)]
# Since the key wasn't in _master_keys, we think that master isn't running.
# ...but before actually starting a master, we'll double-check. This can
# be important because we can't tell that that 'git@myhost.com' is the same
# as 'myhost.com' where "User git" is setup in the user's ~/.ssh/config file.
check_command = command_base + ['-O','check']
try:
Trace(': %s', ' '.join(check_command))
check_process = subprocess.Popen(check_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_process.communicate() # read output, but ignore it...
isnt_running = check_process.wait()
if not isnt_running:
# Our double-check found that the master _was_ infact running. Add to
# the list of keys.
_master_keys.add(key)
return True
except Exception:
# Ignore excpetions. We we will fall back to the normal command and print
# to the log there.
pass
command = command_base[:1] + \
['-M', '-N'] + \
command_base[1:]
try:
Trace(': %s', ' '.join(command))
p = subprocess.Popen(command)
except Exception as e:
_ssh_master = False
print('\nwarn: cannot enable ssh control master for %s:%s\n%s'
% (host,port, str(e)), file=sys.stderr)
return False
time.sleep(1)
ssh_died = (p.poll() is not None)
if ssh_died:
return False
_master_processes.append(p)
_master_keys.add(key)
return True
finally:
_master_keys_lock.release()
def close_ssh():
global _master_keys_lock
terminate_ssh_clients()
for p in _master_processes:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
del _master_processes[:]
_master_keys.clear()
d = ssh_sock(create=False)
if d:
try:
platform_utils.rmdir(os.path.dirname(d))
except OSError:
pass
# We're done with the lock, so we can delete it.
_master_keys_lock = None
URI_SCP = re.compile(r'^([^@:]*@?[^:/]{1,}):')
URI_ALL = re.compile(r'^([a-z][a-z+-]*)://([^@/]*@?[^/]*)/')
def GetSchemeFromUrl(url):
m = URI_ALL.match(url)
if m:
return m.group(1)
return None
@contextlib.contextmanager
def GetUrlCookieFile(url, quiet):
if url.startswith('persistent-'):
try:
p = subprocess.Popen(
['git-remote-persistent-https', '-print_config', url],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
cookieprefix = 'http.cookiefile='
proxyprefix = 'http.proxy='
cookiefile = None
proxy = None
for line in p.stdout:
line = line.strip()
if line.startswith(cookieprefix):
cookiefile = os.path.expanduser(line[len(cookieprefix):])
if line.startswith(proxyprefix):
proxy = line[len(proxyprefix):]
# Leave subprocess open, as cookie file may be transient.
if cookiefile or proxy:
yield cookiefile, proxy
return
finally:
p.stdin.close()
if p.wait():
err_msg = p.stderr.read()
if ' -print_config' in err_msg:
pass # Persistent proxy doesn't support -print_config.
elif not quiet:
print(err_msg, file=sys.stderr)
except OSError as e:
if e.errno == errno.ENOENT:
pass # No persistent proxy.
raise
cookiefile = GitConfig.ForUser().GetString('http.cookiefile')
if cookiefile:
cookiefile = os.path.expanduser(cookiefile)
yield cookiefile, None
def _preconnect(url):
m = URI_ALL.match(url)
if m:
scheme = m.group(1)
host = m.group(2)
if ':' in host:
host, port = host.split(':')
else:
port = None
if scheme in ('ssh', 'git+ssh', 'ssh+git'):
return _open_ssh(host, port)
return False
m = URI_SCP.match(url)
if m:
host = m.group(1)
return _open_ssh(host)
return False
class Remote(object):
"""Configuration options related to a remote.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.url = self._Get('url')
self.pushUrl = self._Get('pushurl')
self.review = self._Get('review')
self.projectname = self._Get('projectname')
self.fetch = list(map(RefSpec.FromString,
self._Get('fetch', all_keys=True)))
self._review_url = None
def _InsteadOf(self):
globCfg = GitConfig.ForUser()
urlList = globCfg.GetSubSections('url')
longest = ""
longestUrl = ""
for url in urlList:
key = "url." + url + ".insteadOf"
insteadOfList = globCfg.GetString(key, all_keys=True)
for insteadOf in insteadOfList:
if self.url.startswith(insteadOf) \
and len(insteadOf) > len(longest):
longest = insteadOf
longestUrl = url
if len(longest) == 0:
return self.url
return self.url.replace(longest, longestUrl, 1)
def PreConnectFetch(self):
connectionUrl = self._InsteadOf()
return _preconnect(connectionUrl)
def ReviewUrl(self, userEmail, validate_certs):
if self._review_url is None:
if self.review is None:
return None
u = self.review
if u.startswith('persistent-'):
u = u[len('persistent-'):]
if u.split(':')[0] not in ('http', 'https', 'sso', 'ssh'):
u = 'http://%s' % u
if u.endswith('/Gerrit'):
u = u[:len(u) - len('/Gerrit')]
if u.endswith('/ssh_info'):
u = u[:len(u) - len('/ssh_info')]
if not u.endswith('/'):
u += '/'
http_url = u
if u in REVIEW_CACHE:
self._review_url = REVIEW_CACHE[u]
elif 'REPO_HOST_PORT_INFO' in os.environ:
host, port = os.environ['REPO_HOST_PORT_INFO'].split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
REVIEW_CACHE[u] = self._review_url
elif u.startswith('sso:') or u.startswith('ssh:'):
self._review_url = u # Assume it's right
REVIEW_CACHE[u] = self._review_url
elif 'REPO_IGNORE_SSH_INFO' in os.environ:
self._review_url = http_url
REVIEW_CACHE[u] = self._review_url
else:
try:
info_url = u + 'ssh_info'
if not validate_certs:
context = ssl._create_unverified_context()
info = urllib.request.urlopen(info_url, context=context).read()
else:
info = urllib.request.urlopen(info_url).read()
if info == b'NOT_AVAILABLE' or b'<' in info:
# If `info` contains '<', we assume the server gave us some sort
# of HTML response back, like maybe a login page.
#
# Assume HTTP if SSH is not enabled or ssh_info doesn't look right.
self._review_url = http_url
else:
info = info.decode('utf-8')
host, port = info.split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
except urllib.error.HTTPError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except urllib.error.URLError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except HTTPException as e:
raise UploadError('%s: %s' % (self.review, e.__class__.__name__))
REVIEW_CACHE[u] = self._review_url
return self._review_url + self.projectname
def _SshReviewUrl(self, userEmail, host, port):
username = self._config.GetString('review.%s.username' % self.review)
if username is None:
username = userEmail.split('@')[0]
return 'ssh://%s@%s:%s/' % (username, host, port)
def ToLocal(self, rev):
"""Convert a remote revision string to something we have locally.
"""
if self.name == '.' or IsId(rev):
return rev
if not rev.startswith('refs/'):
rev = R_HEADS + rev
for spec in self.fetch:
if spec.SourceMatches(rev):
return spec.MapSource(rev)
if not rev.startswith(R_HEADS):
return rev
raise GitError('%s: remote %s does not have %s' %
(self.projectname, self.name, rev))
def WritesTo(self, ref):
"""True if the remote stores to the tracking ref.
"""
for spec in self.fetch:
if spec.DestMatches(ref):
return True
return False
def ResetFetch(self, mirror=False):
"""Set the fetch refspec to its default value.
"""
if mirror:
dst = 'refs/heads/*'
else:
dst = 'refs/remotes/%s/*' % self.name
self.fetch = [RefSpec(True, 'refs/heads/*', dst)]
def Save(self):
"""Save this remote to the configuration.
"""
self._Set('url', self.url)
if self.pushUrl is not None:
self._Set('pushurl', self.pushUrl + '/' + self.projectname)
else:
self._Set('pushurl', self.pushUrl)
self._Set('review', self.review)
self._Set('projectname', self.projectname)
self._Set('fetch', list(map(str, self.fetch)))
def _Set(self, key, value):
key = 'remote.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'remote.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
class Branch(object):
"""Configuration options related to a single branch.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.merge = self._Get('merge')
r = self._Get('remote')
if r:
self.remote = self._config.GetRemote(r)
else:
self.remote = None
@property
def LocalMerge(self):
"""Convert the merge spec to a local name.
"""
if self.remote and self.merge:
return self.remote.ToLocal(self.merge)
return None
def Save(self):
"""Save this branch back into the configuration.
"""
if self._config.HasSection('branch', self.name):
if self.remote:
self._Set('remote', self.remote.name)
else:
self._Set('remote', None)
self._Set('merge', self.merge)
else:
with open(self._config.file, 'a') as fd:
fd.write('[branch "%s"]\n' % self.name)
if self.remote:
fd.write('\tremote = %s\n' % self.remote.name)
if self.merge:
fd.write('\tmerge = %s\n' % self.merge)
def _Set(self, key, value):
key = 'branch.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'branch.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
|
couchbasedeps/git-repo
|
git_config.py
|
Python
|
apache-2.0
| 21,644 | 0.014692 |
class base(object):
proc = dict
pagesize = pow(2, 12)
arch = None
# and other host-specific attributes
def __init__(self, **kwds):
pass
def __getitem__(self, processid):
return self.proc[processid]
def list(self):
pass
def create(self, executable, args, env=[], directory='.', **kwds):
pass
def attach(self, id):
pass
def detach(self, id):
pass
def terminate(self, id):
pass
#######
|
arizvisa/syringe
|
lib/mpdebug/default/host.py
|
Python
|
bsd-2-clause
| 491 | 0 |
import pytest
from nex.reader import Reader, ReaderBuffer
from common import (test_not_here_file_name, test_file_name, test_chars,
test_2_chars)
def test_buffer_init():
"""Check buffer works sensibly."""
r = ReaderBuffer(test_chars)
assert r.i == -1
assert r.chars == test_chars
def test_next_char():
"""Check advancing through a file returns the correct characters."""
r = Reader()
r.insert_chars(test_chars)
cs = [r.advance_loc() for _ in range(4)]
assert cs == test_chars
with pytest.raises(EOFError):
r.advance_loc()
def test_init_missing_file():
"""Check inserting a non-existent file into a reader raises an error."""
r = Reader()
with pytest.raises(IOError):
r.insert_file(test_not_here_file_name)
def test_init_file():
"""Check inserting a non-existent file into a reader raises an error."""
r_direct = Reader()
r_direct.insert_chars(test_chars)
r_file = Reader()
r_file.insert_file(test_file_name)
assert list(r_direct.advance_to_end()) == list(r_file.advance_to_end())
def test_insert_start():
"""Check inserting a new file at the start reads from the second, then the
first."""
r = Reader()
r.insert_chars(test_chars)
r.insert_chars(test_2_chars)
assert list(r.advance_to_end()) == test_2_chars + test_chars
def test_insert_middle():
"""Check inserting a new file halfway through reading a first, reads part
of one, then the second, then the rest of the first."""
r = Reader()
r.insert_chars(test_chars)
cs = [r.advance_loc()]
r.insert_chars(test_2_chars)
cs.extend(list(r.advance_to_end()))
assert cs == ['a', 'd', 'e', 'f', '\n', 'b', 'c', '\n']
def test_insert_end():
"""Check inserting a new file after reading a first, reads the first then the second."""
r = Reader()
r.insert_chars(test_chars)
cs = list(r.advance_to_end())
r.insert_chars(test_2_chars)
cs.extend(list(r.advance_to_end()))
assert cs == test_chars + test_2_chars
def test_peek():
"""Test various errors and constraints on peeking."""
r = Reader()
r.insert_chars(test_chars)
# Can't peek at start of buffer
with pytest.raises(ValueError):
r.peek_ahead(n=0)
r.advance_loc()
assert r.current_char == 'a'
# Can't peek backwards, (especially because this would be end of buffer).
with pytest.raises(ValueError):
r.peek_ahead(n=-1)
# Valid peeks.
assert [r.peek_ahead(n=i) for i in range(4)] == test_chars
# Can't peek too far ahead.
with pytest.raises(ValueError):
r.peek_ahead(n=4)
r.advance_loc()
assert r.current_char == 'b'
# Can't peek past end of buffer.
with pytest.raises(EOFError):
r.peek_ahead(n=3)
def test_advance():
"""Test advancing through the reader on one buffer."""
r = Reader()
r.insert_chars(test_chars)
cs = []
for _ in range(4):
r.advance_loc()
cs.append(r.peek_ahead(0))
assert cs == test_chars
# TODO: Line and column numbering.
# TODO: Peeking and advancing on buffers.
|
eddiejessup/nex
|
tests/test_reader.py
|
Python
|
mit
| 3,129 | 0.00032 |
# -insar.py- coding: utf-8 -*-
"""
Created on Fri Sep 3 10:46:50 2010
@author: bosmanoglu
InSAR module. Includes functions for analyzing SAR interferometry with python.
"""
from numpy import *
from pylab import *
from basic import *
import scipy
from scipy import ndimage #scipy.pkgload('ndimage')
from scipy import signal #scipy.pkgload('signal') #ndimage
from scipy import interpolate #scipy.pkgload('interpolate'); #interp1d,RectBivariateSpline
from scipy import constants #scipy.pkgload('scipy.constants')
from scipy import optimize #scipy.pkgload('optimize')
from scipy import stats
import time_series
import pdb
try:
import stack
from cutting_edge import *
except:
pass
def coh2snr(coh):
return coh/(1.-coh);
def snr2coh(snr):
return snr/(snr+1.);
def coh2pdf(coh,n=100):
domain=linspace(-pi,pi,n);
pdf=(1-coh**2)/(2*pi) \
/ (1-coh**2 * cos(domain)**2) \
* (1 \
+ (coh*cos(domain)*arccos(-1*coh*cos(domain))) \
/ sqrt(1-coh**2*cos(domain)**2) \
)
return pdf
def coh2pdfML(coh,L,n=100,domain=None):
"""coh2pdfML(coh,L,n=100,domain=None)
coh: scalar or vector.
L= scalar, multilook factor
n=100, number of samples in domain [-pi,pi]
domain=vector or [#coh, n] . user specified domains. First axis has to be the same as size(coh).
"""
import scipy
from scipy import special #scipy.pkgload('special')
G=scipy.special.gamma #math.gamma #returns the gamma function value at X, same as scipy.special.gamma
F=scipy.special.hyp2f1 #returns gauss hypergeometric function
if domain is None:
domain=linspace(-pi,pi,n);
if domain.shape[0] == coh.size:
#user specified domain. Should be the same number of elements with coh:
#ccd=dot(atleast_2d(coh), atleast_2d(cos(domain)))
coh=tile(coh, (domain.shape[1],1)).T
ccd=coh*cos(domain);
else:
ccd=dot(atleast_2d(coh).T, atleast_2d(cos(domain))) #Coherence Cos Domain
coh=tile(coh, (domain.shape[0],1)).T
pdf=(1-coh**2)**L/(2*pi) \
* F(L, 1, 0.5,ccd**2) \
+ (G(L+0.5)*(1-coh**2)**L * ccd) \
/ (2*sqrt(pi) * G(L) * (1-ccd**2)**(L+0.5))
return pdf
def coh2stdpha(coh,n=100,lut=None):
'''coh2stdpha(coh,n=100,lut=None)
n:number of samples between -pi +pi
lut: number of samples in look-up-table
ex:
stdpha=coh2stdpha(coh)
stdpha=coh2stdpha(coh,lut=100); #This is much faster but only accurate to 1/100th of coh.max()=1 and coh.min()=0.
'''
if isinstance(coh,list):
coh=array(coh)
elif isinstance(coh,float):
coh=array([coh])
domain=linspace(-pi,pi,n);
dims=coh.shape
stdpha=zeros(dims)
if lut is None:
for k in xrange(size(coh)):#r_[0:size(coh)]:
#numpy.trapz(Y,X) = matlab.trapz(X,Y)
idx=unravel_index(k, dims)
stdpha[idx]=sqrt(trapz(domain**2*coh2pdf(coh[idx],n),domain));
else:
lutx=linspace(coh.min(), coh.max(), lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdf(lutx[k],n),domain));
lutf=scipy.interpolate.interp1d(lutx,luty, 'linear')
stdpha=lutf(coh)
return stdpha
def coh2stdphaML(coh,L,n=100,lut=None):
'''coh2stdpha(coh,L,n=100,lut=None)
n:number of samples between -pi +pi
lut: number of samples in look-up-table
ex:
L=iobj.coherence.Multilookfactor_azimuth_direction * iobj.coherence.Multilookfactor_range_direction
stdpha=coh2stdpha(coh,L)
stdpha=coh2stdpha(coh,L,lut=100); #This is much faster but only accurate to 1/100th of coh.max()=1 and coh.min()=0.
'''
if isinstance(coh,list):
coh=array(coh)
elif isinstance(coh, number):
coh=array([coh])
#elif isinstance(coh,float):
# coh=array([coh])
domain=linspace(-pi,pi,n);
dims=coh.shape
stdpha=zeros(dims)
if lut is None:
for k in xrange(size(coh)):#r_[0:size(coh)]:
#numpy.trapz(Y,X) = matlab.trapz(X,Y)
idx=unravel_index(k, dims)
stdpha[idx]=sqrt(trapz(domain**2*coh2pdfML(coh[idx],L,n),domain));
else:
lutx=linspace(coh.min(), coh.max(), lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdfML(lutx[k],L,n),domain));
lutf=scipy.interpolate.interp1d(lutx,luty, 'linear')
stdpha=lutf(coh)
return stdpha
def stdpha2coh(stdpha, L=1, n=100, lut=100):
'''stdpha2cohML(stdpha, L=1, n=100, lut=100):
Creates a lookup table for coherence to stdpha and uses it to reverse the relation
'''
if isinstance(stdpha,list):
stdpha=array(stdpha)
elif isinstance(stdpha, number):
stdpha=array([stdpha])
domain=linspace(-pi,pi,n);
lutx=linspace(0.01, 0.99, lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdfML(lutx[k],L,n),domain));
lutf=scipy.interpolate.interp1d(flipud(luty),flipud(lutx), 'linear', bounds_error=False)
coh=lutf(stdpha);
coh[stdpha > luty.max() ]=0.01;
coh[stdpha < luty.min() ]=0.99;
return coh
def gradient_coherence(m,s=None, w=(5,5), low_pass=True):
if any(iscomplexobj(m)):
mg0,mg1=cpxgradient(m)
else:
mg0,mg1=gradient(m)
if s is None:
s=empty(m.shape, dtype=complex);
s[:]=1.+0.j
if any(iscomplexobj(s)):
sg0,sg1=cpxgradient(s)
else:
sg0,sg1=gradient(s)
if low_pass is True:
mg0=scipy.ndimage.generic_filter(mg0, mean, size=w)
mg1=scipy.ndimage.generic_filter(mg1, mean, size=w)
sg0=scipy.ndimage.generic_filter(sg0, mean, size=w)
sg1=scipy.ndimage.generic_filter(sg1, mean, size=w)
#pdb.set_trace()
return coherence(mg0+1j*mg1, sg0+1j*sg1, w=w)
def coherence(m,s=None,w=(5,5)):
'''coherence(master, slave=None, window):
input is master and slave complex images (tested for 1D only)
w is the calculation window.
'''
coh=zeros(size(m))
corrFilter= ones(w)
nfilt=corrFilter.size
corrFilter=corrFilter/nfilt
# Em=scipy.ndimage.filters.correlate(m*conj(m),corrFilter,mode='nearest')
# Es=scipy.ndimage.filters.correlate(s*conj(s),corrFilter,mode='nearest')
# Ems=scipy.ndimage.filters.correlate(m*conj(s),corrFilter,mode='nearest')
if s is None:
s=empty(m.shape, dtype=complex)
s[:]=exp(1.j*0);
Em=scipy.signal.signaltools.correlate(m*conj(m), corrFilter, mode='same')
Es=scipy.signal.signaltools.correlate(s*conj(s), corrFilter, mode='same')
Ems=scipy.signal.signaltools.correlate(m*conj(s), corrFilter, mode='same')
coh=abs(Ems / (sqrt(Em**2+Es**2)/sqrt(2))) #need to divide by two to get root mean square
# for k in r_[0:len(m)]:
# if k+w>=len(m):
# a=k+w-len(m)+1
# else:
# a=0
# mw=m[k-a:k+w]
# sw=s[k-a:k+w]
# coh[k]=mean(mw*conj(sw))/sqrt(mean(mw*conj(mw))*mean(sw*conj(sw)))
return coh
def crosscorrelate(m,s):
"""crosscorrelation(m,s):
"""
coh=zeros(size(m))
#corrFilter= ones(m.shape)
#nfilt=corrFilter.size
#corrFilter=corrFilter/nfilt
#m=rescale(m, [-1,1]);
#m=m-m.mean()
#s=rescale(s, [-1,1]);
#s=s-s.mean()
Em=(m*m.conj()).mean() # Em=(m**2.).sum()
Es=(s*s.conj()).mean() # Es=(s**2.).sum()
Ems=(m*s.conj()).mean() # Ems=(m*s).sum()
#Em=scipy.signal.signaltools.correlate(m*m, corrFilter, mode='same')
#Es=scipy.signal.signaltools.correlate(s*s, corrFilter, mode='same')
#Ems=scipy.signal.signaltools.correlate(m*s, corrFilter, mode='same')
coh=abs(Ems / sqrt(Em*Es))#1.4142135623730949#(2./sqrt(2.))
return coh
def correlate(m,s,w):
coh=zeros(m.shape)
w0=int(w[0]/2.)
w1=int(w[1]/2.)
for k in xrange(m.shape[0]):
for l in xrange(m.shape[1]):
if k<w0:
kk=r_[0:k+w0];
elif k>m.shape[0]-w0:
kk=r_[k-w0:m.shape[0]]
else:
kk=r_[k-w0:k+w0]
if l<w1:
ll=r_[0:l+w1];
elif l>m.shape[1]-w1:
ll=r_[l-w1:m.shape[1]]
else:
ll=r_[l-w1:l+w1]
K,L=meshgrid(kk,ll)
coh[k,l]=crosscorrelate(m[K,L],s[K,L])
#coh[k,l]=abs(scipy.stats.pearsonr(m[K,L].ravel(),s[K,L].ravel())[0]);
return coh
def readComplexData(fname, width, length=0, dtype=float):
if length==0:
filesize=os.path.getsize(fname)
length=float(filesize)/width/2
if isint(length):
print("Error with file width, will continue but results might be bad.")
data=fromfile(fname, dtype ,width*2*length).reshape(2*width, length)
def ipd(x):
return angle(hstack([0, x[1:]*x[0:-1].conj()])).cumsum()
def ipg(cintP, cintNei, unwNei, weiNei=None):
if weiNei is None:
weiNei=ones(size(cintNei));
return sum(weiNei*(unwNei-angle(cintNei*conj(cintP))))/sum(weiNei);
def radarcode_dem(dem, alpha=0.1745, theta=0.3316, R1=830000., dx=80. ):
"""radarcoded_DEM=radarcode_dem(dem, alpha=0.1745, theta=0.3316, R1=830000., dx=80. )
calculates a 1m 1rad bperp and runs siminterf without any noise.
"""
#based on SIMINTERF.m which was
# Created by Bert Kampes 05-Oct-2000
# Tested by Erik Steenbergen
#initialize output
numlines=dem.shape[0]
numpixels=dem.shape[1]
rdem=zeros([numlines,numpixels]);
# Some variables for ERS1/2 and Envisat
#alpha=deg2rad(10.); #[rad] baseline orientation
#wavelen = 0.05666; #[m] wavelength
#theta = deg2rad(19.) #[rad] looking angle to first pixel
#R1 = 830000. #[m] range to first point
#pi4divlam = (-4.*pi)/wavelen #lam(lambda)=wavelen, can't use lambda in python it is a registered command.
#dx = 80 #[m] dem resolution
#Radarcode DEM by orbit information
print ('Radarcoding DEM')
numpixelsdem=dem.shape[1]
x0=sin(theta) * R1 #x coord. of first DEM point
sat1_x=0.
sat1_y=cos(theta) * R1 + dem[1,1]
maxrange = sqrt((x0+(numpixelsdem-1)*dx)**2+sat1_y**2)-dem.max();
R1extra = R1+dem.max();
totalrange = maxrange-R1extra;
rangebinsize = totalrange/numpixels;
rangegrid = arange(R1extra,maxrange,rangebinsize)-rangebinsize;
x = arange(x0,x0+dx*(numpixelsdem),dx);# x coord. w.r.t. sat1
xsqr = x**2;
#compute range for all lines of the dem
for az in range(0,dem.shape[0]):
y = sat1_y-dem[az,:]
range2master = sqrt(y**2+xsqr)
## Interpolate p to grid rangebins
## range is not always increasing due to foreshortning
sortindex = argsort(range2master);
range2master = range2master[sortindex]
rdem[az,:]=interp(rangegrid,range2master,dem[az,:]);
return rdem
def siminterf(dem,Bperp=100,doNoise=1,waterHeight=None,alpha=0.1745, \
wavelen=0.05666, theta=0.3316, R1=830000., dx=80., Bpar=None, defoRate=None, Btemp=None, coh='Geometric',
temporal_decorrelation_factor=3e-4*365.):
'''[interf,coh,h2ph,refpha]=siminterf(dem,Bperp=100,doNoise=1,waterHeight=None,alpha=0.1745, \
wavelen=0.05666, theta=0.3316, R1=830000, dx=80):
DEPRECATED:doNoise can be 1 or 0. If zero gaussian noise is not added.(USE COH=None for 0 instead).
if Bpar is given, alpha is calculated based on Bpar and Bperp. See Radar Interferometry pg.117, by R. Hanssen.
coh=[None|'Geometric'|'Geometric+Temporal'|float|array]
If None, no additional noise is added.
Geometric: Based on critical perpendicular baseline (simnoise)
Temporal: Based on temporal baseline (see temporal_decorrelation_factor)
float: Single coherence value for all interferogram
array: Apply given coherence.
temporal_decorrelation_factor=3e-4*365 for btemp in years: exp(-TDF * Btemp) e.g. TDF=3e-4 for btemp in days (See Simulation of timeseries surface deformation by C.W. Lee et al., 2012)
'''
#based on SIMINTERF.m which was
# Created by Bert Kampes 05-Oct-2000
# Tested by Erik Steenbergen
#initialize output
numlines=dem.shape[0]
numpixels=dem.shape[1]
interf=zeros([numlines,numpixels]);
slope =zeros([numlines,numpixels]);
h2ph =ones([numlines,numpixels]);
refpha=ones([numlines,numpixels]);
# Some variables for ERS1/2 and Envisat
#alpha=deg2rad(10.); #[rad] baseline orientation
#wavelen = 0.05666; #[m] wavelength
#theta = deg2rad(19.) #[rad] looking angle to first pixel
#R1 = 830000. #[m] range to first point
pi4divlam = (-4.*pi)/wavelen #lam(lambda)=wavelen, can't use lambda in python it is a registered command.
#dx = 80 #[m] dem resolution
#Radarcode DEM by orbit information
print ('Radarcoding DEM')
numpixelsdem=dem.shape[1]
x0=sin(theta) * R1 #x coord. of first DEM point
sat1_x=0.
sat1_y=cos(theta) * R1 + dem[1,1]
maxrange = sqrt((x0+(numpixelsdem-1)*dx)**2+sat1_y**2)-dem.max();
R1extra = R1+dem.max();
totalrange = maxrange-R1extra;
rangebinsize = totalrange/numpixels;
rangegrid = arange(R1extra,maxrange,rangebinsize)-rangebinsize;
#compute range diff to slave satellite
#B = Bperp / cos(theta-alpha);
#batu - bpar
if (Bpar!=None):
alpha = theta - arctan2(Bpar, Bperp);
B = sqrt(Bpar**2.+Bperp**2.); #Bpar / sin(theta-alpha);
print 'alpha: ', alpha
else:
B = Bperp / cos(theta-alpha);
Bpar = B * sin (theta - alpha);
print 'Bpar: ', Bpar
#end bpar
sat2_x = B * cos(alpha);
sat2_y = B * sin(alpha) + sat1_y;
x = arange(x0,x0+dx*(numpixelsdem),dx);# x coord. w.r.t. sat1
x2sqr = (x - sat2_x)**2;
xsqr = x**2;
#compute range for all lines of the dem
for az in range(0,dem.shape[0]):
y = sat1_y-dem[az,:]
range2master = sqrt(y**2+xsqr)
y2 = sat2_y-dem[az,:]
range2slave = sqrt(y2**2+x2sqr)
phase = pi4divlam * (range2slave-range2master);
# remove reference phase
tantheta = x/y2
deltax = dem[az,:] / tantheta # far field approx
x2_0 = x - deltax
refpharangemaster = sqrt(sat1_y**2 + x2_0**2)
refpharangeslave = sqrt(sat2_y**2 + (x2_0-sat2_x)**2)
refphase = pi4divlam * (refpharangeslave-refpharangemaster);
refpha[az,:]=refphase;
phase = phase - refphase;
## Interpolate p to grid rangebins
## range is not always increasing due to foreshortning
sortindex = argsort(range2master);
range2master = range2master[sortindex]
phase = phase[sortindex];
interf[az,:]=interp(rangegrid,range2master,phase);
## calculate slope and simulate noise
slopedem= arctan2(diff(dem[az,:]),dx)
slopedem= hstack((slopedem, [0]))
slopedem= slopedem[sortindex]
slope[az,:]=interp(rangegrid,range2master,slopedem);
h2ph[az,:] = -pi4divlam*Bperp/(range2master*sin(theta));
noise=zeros(interf.shape)
if doNoise==1 and coh is None:
print("DEPRECATED. Use coh instead.")
coh="Geometric"
if coh is not None:
if "Geometric" in coh:
noiseCoherence=simnoise(slope, Bperp)
noise = noiseCoherence[0];
#coh = noiseCoherence[1];
if "Temporal" in coh and temporal_decorrelation_factor is not None:
temporal_coh=exp(-temporal_decorrelation_factor*Btemp)
noise=noise+random.randn(*interf.shape)*coh2stdpha(temporal_coh, 20)
if defoRate is not None: # Deformation is always included Coherence if specified.
noise=noise+ (pi4divlam*defoRate*Btemp)
if isfloat(coh) and coh.size==1: #isfloat=basic.isfloat
stdphase=coh2stdpha(coh, 20); # This calculation is based on simnoise.
noise=random.randn(*interf.shape) * stdphase
if isarray(coh) and coh.shape==interf.shape:
stdphase=coh2stdpha(coh, 20); # This calculation is based on simnoise.
noise=random.randn(*coh.shape) * stdphase
#noiseCoherence=simnoise(slope, Bperp, Bw=15550000.,wavelen=wavelen, theta=theta, R1=R1)
#noise = noiseCoherence[0];
#coh = noiseCoherence[1];
#if doNoise==1:
#coh=coherence(exp(-1j*interf), exp(-1j*(interf+noise)), [3,3]) # This overwrites coherence based on the actual noise applied. Should be close to input coherence???
interf= interf + noise # This also adds the deformation signal.
coh = stdpha2coh(moving_window(interf, func=std))
#if defoRate is not None:
# interf= interf+ (-pi4divlam*defoRate*Btemp)
if waterHeight!=None:
waterMask=(dem<waterHeight);
putmask(interf,waterMask,2*pi*randn(sum(waterMask)));
putmask(coh,waterMask,0.05*abs(randn(sum(waterMask))))
return [interf,coh,h2ph,refpha]
def simnoise(slope,Bperp,Bw=15550000.,wavelen=0.05666, theta=0.3316, R1=830000.):
"""simnoise(slope,Bperp,Bw=15550000.,wavelen=0.05666, theta=0.3316, R1=830000.):
Bw=range Band width [Hz]
wavelen = [m]
theta = look angle [rad]
R1= range to first pixel [m]
This function calculates the geometric coherence and related noise level
based on the ERS1/2 configuration (range bandwith, wavelength, look angle,
satellite altitude).
"""
# Some variables for ERS1/2 and Envisat
#Bw = 15550000; #[Hz] range bandwidth
#alpha=deg2rad(10.); #[rad] baseline orientation
#wavelen = 0.05666; #[m] wavelength
#theta = deg2rad(19.) #[rad] looking angle to first pixel
#R1 = 830000. #[m] range to first point
#pi4divlam = (-4.*pi)/wavelen #lam(lambda)=wavelen, can't use lambda in python it is a registered command.
#dx = 80 #[m] dem resolution
c = scipy.constants.c; #[m/s] speed of light
#critical baseline
Bcritical = wavelen*(Bw/c)*R1*tan(theta-slope);
gammageom = abs((Bcritical-abs(Bperp))/Bcritical); #Batu: 20181228 - Bperp<0 was causing nan otherwise.
gammageom[isnan(gammageom)]=0
stdphase=coh2stdpha(gammageom,20)
#r = random.randn(*gammageom.shape)
noise = random.randn(*gammageom.shape) * stdphase
#gammageom = gammageom*(1-gammageom)*abs(r)
return [noise, gammageom]
def phaseDerivativeVariance(p):
"""phaseDerivativeVariance(phase)
This function calculates the derivative variance for the given complex phase
data. This function is based on Bruce Spottiswoode 2008 PhaseDerivativeVariance.m
file. This function is re-written based on Ghiglia and Pritt,
'Two dimensional phase unwrapping', 1998, p.76
"""
#calculate dr (range)
dims=p.shape
dr=zeros(dims)
#first row
dr[:,0]=angle(p[:,0]*conj(p[:,1]))
dr[:,-1]=angle(p[:,-2]*conj(p[:,-1]))
for r in r_[1:dims[1]-1]:
dr[:,r]=angle(p[:,r-1]*conj(p[:,r]))
nfilt=9.0
corrFilter= array([[1,1,1],[1,1,1],[1,1,1]])/nfilt #http://docs.scipy.org/doc/scipy-0.7.x/reference/tutorial/ndimage.html
mean_dr=scipy.ndimage.filters.correlate(dr,corrFilter,mode='nearest')
var_dr=scipy.ndimage.filters.correlate((dr-mean_dr)**2,corrFilter,mode='nearest')
#calculate da (azimuth), dy in spottiswoode
da=zeros(dims)
da[0,:]=angle(p[0,:]*conj(p[1,:]))
da[-1,:]=angle(p[-2,:]*conj(p[-1,:]))
for a in r_[1:dims[0]-1]:
da[a,:]=angle(p[a-1,:]*conj(p[a,:]))
mean_da=scipy.ndimage.filters.correlate(da,corrFilter,mode='nearest')
var_da=scipy.ndimage.filters.correlate((da-mean_da)**2,corrFilter,mode='nearest')
var=sqrt(var_da)+sqrt(var_dr)
return var
def phaseDerivativeVarianceReal(p):
"""phaseDerivativeVarianceReal(2dArray)
This function calculates the derivative variance for the given complex phase
data. This function is based on Bruce Spottiswoode 2008 PhaseDerivativeVariance.m
file. This function is re-written based on Ghiglia and Pritt,
'Two dimensional phase unwrapping', 1998, p.76
"""
#calculate dr (range)
dims=p.shape
dr=np.zeros(dims)
#first row
dr[:,0]=p[:,0]-p[:,1]
dr[:,-1]=p[:,-2]-p[:,-1]
for r in np.r_[1:dims[1]-1]:
dr[:,r]=p[:,r-1]-p[:,r]
nfilt=9.0
corrFilter=np.array([[1,1,1],[1,1,1],[1,1,1]])/nfilt #http://docs.scipy.org/doc/scipy-0.7.x/reference/tutorial/ndimage.html
mean_dr=scipy.ndimage.filters.correlate(dr,corrFilter,mode='nearest')
var_dr=scipy.ndimage.filters.correlate((dr-mean_dr)**2,corrFilter,mode='nearest')
#calculate da (azimuth), dy in spottiswoode
da=np.zeros(dims)
da[0,:]=p[0,:]-p[1,:]
da[-1,:]=p[-2,:]-p[-1,:]
for a in np.r_[1:dims[0]-1]:
da[a,:]=p[a-1,:]-p[a,:]
mean_da=scipy.ndimage.filters.correlate(da,corrFilter,mode='nearest')
var_da=scipy.ndimage.filters.correlate((da-mean_da)**2,corrFilter,mode='nearest')
return np.sqrt(var_da+var_dr)
def cpxgradient(cpx):
out=[];
for k in xrange(cpx.ndim):
cpx=rollaxis(cpx,k,0)
d=zeros(cpx.shape)
d[0:-1,:]=angle(cpx[1:,:]*conj(cpx[0:-1,:]))
d[1:,:]=d[1:,:]+d[0:-1,:]
d[1:,:]=0.5*d[1:,:]
out.append(rollaxis(d,k,0))
return out;
def multilook(x,ratio):
"""multilook(data,ratio)
data: is a numpy array.
ratio: is a list of ratios with number of elements equal to number of data dimensions.
CURRENTLY only 2D data is SUPPORTED.
"""
#http://lists.ipython.scipy.org/pipermail/numpy-discussion/2010-July/051760.html
#l=0;
L=x.shape[0];
#p=0;
P=x.shape[1];
outL=np.int(floor(float(L)/ratio[0]))
outP=np.int(floor(float(P)/ratio[1]))
x=x[0:ratio[0]*outL,0:ratio[1]*outP]
out=x.reshape(outL,ratio[0],outP,ratio[1]);
return out.mean(axis=3).mean(axis=1);
def oversample(data,ratio, method='quick', shape=None):
"""oversample(data,ratio, method='quick', shape=None)
data: is a numpy array.
ratio: is a list of ratios with number of elements equal to number of data dimensions.
method={'quick','linear', 'nearest', 'cubic'}
CURRENTLY only 2D data is SUPPORTED.
"""
includesNan=False
if any(np.isnan(data)):
m=np.isnan(data);
z=data.copy();
z[m]=0;
includesNan=True
else:
z=data
x=np.r_[0:z.shape[0]];
y=np.r_[0:z.shape[1]];
if shape is None:
X=np.linspace(0.,z.shape[0]-1,z.shape[0]*ratio[0])
Y=np.linspace(0.,z.shape[1]-1,z.shape[1]*ratio[1])
else:
X=np.linspace(0.,z.shape[0]-1,shape[0])
Y=np.linspace(0.,z.shape[1]-1,shape[1])
if method == "quick":
spl=scipy.interpolate.RectBivariateSpline(x,y,z)
zo=spl(X,Y);
else:
y,x=np.meshgrid(y,x)
Y,X=np.meshgrid(Y,X)
zo=scipy.interpolate.griddata((x[~m],y[~m]),z[~m], (X,Y), method=method)
if (includesNan) & (method == "quick"):
splm=scipy.interpolate.RectBivariateSpline(x,y,m);
mo=splm(X,Y)
mo[mo>0.5]=True
mo[mo<0.5]=False
#print int( np.ceil(np.sqrt(zo.shape[0]/z.shape[0]*zo.shape[1]/z.shape[1])) +3)
mo=scipy.ndimage.binary_dilation(mo, iterations=int( np.ceil(np.sqrt(zo.shape[0]/z.shape[0]*zo.shape[1]/z.shape[1])) +3) );
zo[mo.astype(np.bool)]=np.nan
return zo
def rad2dist(radians, wavelength=0.056):
'''rad2dist(radians, wavelength=0.056)
Returns distance corresponding to radians in the same unit as wavelegth.
'''
return radians*(wavelength/(4*pi));
def dist2rad(distance, wavelength=0.056):
'''dist2rad(distance, wavelength=0.056):
Returns radians corresponding to distance. Distance and wavelength has to be in the same units.
'''
return distance*4*pi/wavelength
def h2ph(Bperp, wavelength=0.0566, R=830e3, theta=deg2rad(23.0), bistatic=False):
'''h2ph(Bperp, wavelength=0.0566, R=800e3, theta=deg2rad(23.0))
Height-to-phase calculation.
Bperp: Perpendicular baseline [m]
Wavelength: Radar wavelength [m]
R: range to master [m]
theta: Look-angle [rad]
'''
if bistatic:
pi4divlam=(-2.*pi)/wavelength;
else:
pi4divlam=(-4.*pi)/wavelength;
return -pi4divlam*Bperp/(R*sin(theta))
def xyz2los(inVector, projectionVector=zeros([1,3]), incidenceAngle=0, headingAngle=0 ):
'''xyz2los(inVector, projectionVector=zeros([1,3]), incidenceAngle=0, headingAngle=0 ):
'''
if all(projectionVector==0):
#Using Hanssen Radar Interferometry, page 162 Eq. 5.1.1
projectionVector=[-sin(incidenceAngle)*cos(headingAngle-1.5*pi), -sin(incidenceAngle)*sin(headingAngle-1.5*pi), cos(incidenceAngle)];#North East Up
projectionVector=atleast_2d(projectionVector);
los=dot(inVector, projectionVector.T) / sqrt(nansum((projectionVector)**2));
return los
def los2up(los, incidenceAngle=0):
'''los2up(los, incidenceAngle )
los: Line of sight deformation
incidenceAngle: radar incidence angle in radians
Returns vertical translation of LOS assuming horizontal displacement is zero.
'''
return los / cos(incidenceAngle)
|
bosmanoglu/adore-doris
|
lib/python/insar/__init__.py
|
Python
|
gpl-2.0
| 25,698 | 0.033349 |
import warnings
from .std import TqdmExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
from .autonotebook import tqdm, trange
__all__ = ["tqdm", "trange"]
|
drammock/mne-python
|
mne/externals/tqdm/_tqdm/auto.py
|
Python
|
bsd-3-clause
| 231 | 0 |
"""Selenium tests."""
from .conftest import assert_regex, wait_for, wait_for_true
from selenium.common.exceptions import NoSuchElementException
from um.exercises.factories import ExerciseFactory
import pytest
import time
def test_florian_adds_a_new_exercise(browser, live_server):
# Florian wants to add a new exercise.
# He starts by opening the home page,
browser.get(live_server.url)
# and sees that it's there.
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
# He finds the "Plus" navbar menu and clicks it.
browser.find_element_by_id('navbarDropdownPlusMenu').click()
time.sleep(0.5)
# There is the the "Add new exercise" button.
browser.find_element_by_id('id_add_exercise').click()
# Next, he is presented a form to create a new exercise
wait_for(lambda: browser.find_element_by_tag_name('form'))
assert_regex(browser.current_url, '.+/new')
# He enters a simple exercise into the text area,
browser.find_element_by_id('id_text').send_keys('What is 5 + 4?')
# and clicks the submit button.
browser.find_element_by_id('submit-id-submit').click()
# Then, he gets back to the home page,
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
assert_regex(browser.current_url, '.+/')
# and the new exercise is displayed there.
assert 'What is 5 + 4?' in browser.page_source
def test_user_edits_an_exercise(browser, live_server, user):
# GIVEN an existing exercise
ex = ExerciseFactory.create(author=user)
# Florian goes to the home page and wants to edit this exercise
browser.get(live_server.url)
# and sees that it's there.
wait_for(lambda: browser.find_element_by_id(f'id_edit_{ex.id}'))
# He clicks the Edit button,
browser.find_element_by_id(f'id_edit_{ex.id}').click()
# and gets to the update form.
wait_for(lambda: browser.find_element_by_tag_name('form'))
assert_regex(browser.current_url, f'.+/{ex.id}/edit')
# He replaces the exercise text,
textarea = browser.find_element_by_id('id_text')
textarea.clear()
textarea.send_keys('This exercise isn\'t good enough. \( 5 + 4 = 9 \).')
# and clicks submit.
browser.find_element_by_id('submit-id-submit').click()
# Then, he gets back to the home page,
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
assert_regex(browser.current_url, '.+/')
# and the new text is displayed.
assert 'This exercise ' in browser.page_source
def test_anonymous_user_views_an_exercise(anon_browser, live_server):
browser = anon_browser
# GIVEN an existing exercise
ex = ExerciseFactory.create()
# Florian goes to the home page and wants to inspect the exercise,
browser.get(live_server.url)
# sees that it's there.
wait_for(lambda: browser.find_element_by_id(f'id_detail_{ex.id}'))
# He clicks the Details button,
browser.find_element_by_id(f'id_detail_{ex.id}').click()
# and gets to the detail view.
wait_for(lambda: browser.find_element_by_id('id_text'))
assert_regex(browser.current_url, f'.+/{ex.id}/')
# He clicks the `back` button.
browser.find_element_by_id('back-id-back').click()
# Then, he gets back to the home page,
assert_regex(browser.current_url, '.+/')
def test_florian_deletes_an_exercise(browser, live_server, user):
# GIVEN an existing exercise
ex = ExerciseFactory.create(author=user)
# Florian goes to the home page and wants to delete this exercise
browser.get(live_server.url)
# and sees that it's there.
wait_for(lambda: browser.find_element_by_id(f'id_detail_{ex.id}'))
# He clicks the View button,
browser.find_element_by_id(f'id_detail_{ex.id}').click()
# and gets to the detail view
wait_for(lambda: browser.find_element_by_id(f'id_delete_{ex.id}'))
assert_regex(browser.current_url, f'.+/{ex.id}/')
# He clicks the "Delete" button
browser.find_element_by_id(f'id_delete_{ex.id}').click()
# let the modal pop up
time.sleep(0.5)
# And confirms the deletion
browser.find_element_by_id('submit-id-submit').click()
# Then, he gets back to the home page,
wait_for(lambda: browser.find_element_by_id('id_add_exercise'))
assert_regex(browser.current_url, '.+/')
# and the exercise is gone.
with pytest.raises(NoSuchElementException):
browser.find_element_by_id(f'id_detail_{ex.id}')
|
FlowFX/unkenmathe.de
|
src/system_tests/test_adding_and_editing_exercises.py
|
Python
|
agpl-3.0
| 4,459 | 0.000673 |
# -*- encoding: utf-8 -*-
"""Test class for Roles UI"""
from ddt import ddt
from fauxfactory import gen_string
from nailgun import entities
from robottelo.decorators import data
from robottelo.helpers import generate_strings_list, invalid_names_list
from robottelo.test import UITestCase
from robottelo.ui.factory import make_role
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
@ddt
class Role(UITestCase):
"""Implements Roles tests from UI"""
@data(*generate_strings_list(len1=10))
def test_create_role_basic(self, name):
"""@Test: Create new role
@Feature: Role - Positive Create
@Assert: Role is created
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
@data('', ' ')
def test_negative_create_role_with_blank_name(self, name):
"""@Test: Create new role with blank and whitespace in name
@Feature: Role - Negative Create
@Assert: Role is not created
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['name_haserror']))
@data(*invalid_names_list())
def test_negative_create_role_with_too_long_names(self, name):
"""@Test: Create new role with 256 characters in name
@Feature: Role - Negative Create
@Assert: Role is not created
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['name_haserror']))
@data(*generate_strings_list(len1=10))
def test_remove_role(self, name):
"""@Test: Delete an existing role
@Feature: Role - Positive Delete
@Assert: Role is deleted
"""
with Session(self.browser) as session:
make_role(session, name=name)
self.role.delete(name)
@data(*generate_strings_list(len1=10))
def test_update_role_name(self, new_name):
"""@Test: Update role name
@Feature: Role - Positive Update
@Assert: Role is updated
"""
name = gen_string('utf8')
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
self.role.update(name, new_name)
self.assertIsNotNone(self.role.search(new_name))
def test_update_role_permission(self):
"""@Test: Update role permissions
@Feature: Role - Positive Update
@Assert: Role is updated
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
self.role.update(
name,
add_permission=True,
resource_type='Architecture',
permission_list=['view_architectures', 'create_architectures'],
)
def test_update_role_org(self):
"""@Test: Update organization under selected role
@Feature: Role - Positive Update
@Assert: Role is updated
"""
name = gen_string('alpha')
org = entities.Organization().create()
with Session(self.browser) as session:
make_role(session, name=name)
self.assertIsNotNone(self.role.search(name))
self.role.update(
name,
add_permission=True,
resource_type='Activation Keys',
permission_list=['view_activation_keys'],
organization=[org.name],
)
|
abalakh/robottelo
|
tests/foreman/ui/test_role.py
|
Python
|
gpl-3.0
| 3,848 | 0 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='model.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0bmodel.proto\"\x89\x01\n\x07Weights\x12\x12\n\nlayer_name\x18\x01 \x01(\t\x12\x13\n\x0bweight_name\x18\x02 \x01(\t\x12\r\n\x05shape\x18\x03 \x03(\r\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x12\x14\n\x0cquantize_min\x18\x06 \x01(\x02\x12\x14\n\x0cquantize_max\x18\x07 \x01(\x02\"\x80\x01\n\x05Model\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x15\n\rkeras_version\x18\x03 \x01(\t\x12\x0f\n\x07\x62\x61\x63kend\x18\x04 \x01(\t\x12\x14\n\x0cmodel_config\x18\x05 \x01(\t\x12\x1f\n\rmodel_weights\x18\x06 \x03(\x0b\x32\x08.Weightsb\x06proto3')
)
_WEIGHTS = _descriptor.Descriptor(
name='Weights',
full_name='Weights',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='layer_name', full_name='Weights.layer_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_name', full_name='Weights.weight_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shape', full_name='Weights.shape', index=2,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Weights.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='Weights.data', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantize_min', full_name='Weights.quantize_min', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantize_max', full_name='Weights.quantize_max', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=153,
)
_MODEL = _descriptor.Descriptor(
name='Model',
full_name='Model',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Model.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='Model.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keras_version', full_name='Model.keras_version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='backend', full_name='Model.backend', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_config', full_name='Model.model_config', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_weights', full_name='Model.model_weights', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=284,
)
_MODEL.fields_by_name['model_weights'].message_type = _WEIGHTS
DESCRIPTOR.message_types_by_name['Weights'] = _WEIGHTS
DESCRIPTOR.message_types_by_name['Model'] = _MODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Weights = _reflection.GeneratedProtocolMessageType('Weights', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTS,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:Weights)
))
_sym_db.RegisterMessage(Weights)
Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), dict(
DESCRIPTOR = _MODEL,
__module__ = 'model_pb2'
# @@protoc_insertion_point(class_scope:Model)
))
_sym_db.RegisterMessage(Model)
# @@protoc_insertion_point(module_scope)
|
transcranial/keras-js
|
python/model_pb2.py
|
Python
|
mit
| 6,998 | 0.003144 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.