text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test setting the $P4COMSTR variable.
"""
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('Perforce', ['Perforce', 'sub'], 'sub')
sub_Perforce = os.path.join('sub', 'Perforce')
sub_SConscript = os.path.join('sub', 'SConscript')
sub_all = os.path.join('sub', 'all')
sub_ddd_in = os.path.join('sub', 'ddd.in')
sub_ddd_out = os.path.join('sub', 'ddd.out')
sub_eee_in = os.path.join('sub', 'eee.in')
sub_eee_out = os.path.join('sub', 'eee.out')
sub_fff_in = os.path.join('sub', 'fff.in')
sub_fff_out = os.path.join('sub', 'fff.out')
test.write('my-p4.py', """
import shutil
import sys
for f in sys.argv[1:]:
shutil.copy('Perforce/'+f, f)
""")
test.write('SConstruct', """
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(TOOLS = ['default', 'Perforce'],
BUILDERS={'Cat':Builder(action=cat)},
P4COM='%(_python_)s my-p4.py $TARGET',
P4COMSTR='Checking out $TARGET from our fake Perforce')
env.Cat('aaa.out', 'aaa.in')
env.Cat('bbb.out', 'bbb.in')
env.Cat('ccc.out', 'ccc.in')
env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
env.SourceCode('.', env.Perforce())
SConscript('sub/SConscript', "env")
""" % locals())
test.write(['Perforce', 'sub', 'SConscript'], """\
Import("env")
env.Cat('ddd.out', 'ddd.in')
env.Cat('eee.out', 'eee.in')
env.Cat('fff.out', 'fff.in')
env.Cat('all', ['ddd.out', 'eee.out', 'fff.out'])
""")
test.write(['Perforce', 'aaa.in'], "Perforce/aaa.in\n")
test.write('bbb.in', "checked-out bbb.in\n")
test.write(['Perforce', 'ccc.in'], "Perforce/ccc.in\n")
test.write(['Perforce', 'sub', 'ddd.in'], "Perforce/sub/ddd.in\n")
test.write(['sub', 'eee.in'], "checked-out sub/eee.in\n")
test.write(['Perforce', 'sub', 'fff.in'], "Perforce/sub/fff.in\n")
test.run(arguments = '.',
stdout = test.wrap_stdout(read_str = """\
Checking out %(sub_SConscript)s from our fake Perforce
""" % locals(),
build_str = """\
Checking out aaa.in from our fake Perforce
cat(["aaa.out"], ["aaa.in"])
cat(["bbb.out"], ["bbb.in"])
Checking out ccc.in from our fake Perforce
cat(["ccc.out"], ["ccc.in"])
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
Checking out %(sub_ddd_in)s from our fake Perforce
cat(["%(sub_ddd_out)s"], ["%(sub_ddd_in)s"])
cat(["%(sub_eee_out)s"], ["%(sub_eee_in)s"])
Checking out %(sub_fff_in)s from our fake Perforce
cat(["%(sub_fff_out)s"], ["%(sub_fff_in)s"])
cat(["%(sub_all)s"], ["%(sub_ddd_out)s", "%(sub_eee_out)s", "%(sub_fff_out)s"])
""" % locals()))
test.must_match('all',
"Perforce/aaa.in\nchecked-out bbb.in\nPerforce/ccc.in\n")
test.must_match(['sub', 'all'],
"Perforce/sub/ddd.in\nchecked-out sub/eee.in\nPerforce/sub/fff.in\n")
#
test.pass_test()
|
datalogics/scons
|
test/Perforce/P4COMSTR.py
|
Python
|
mit
| 4,112 | 0.002432 |
from ..msg import die
from ..ast import *
def unexpected(item):
die("unexpected construct '%s'" % item.get('type','unknown'), item)
def parse_varuse(varuse, item):
#print "parse varuse %x: item %x: %r" % (id(varuse), id(item), item)
varuse.loc = item['loc']
varuse.name = item['name'].strip()
if item.has_key('rhs'):
varuse.rhs = parse_block(item['rhs'])
return varuse
def parse_create(item):
c = Create(loc = item['loc'],
loc_end = item['loc_end'],
label = item['lbl'],
place = parse_block(item['place']),
start = parse_block(item['start']),
limit = parse_block(item['limit']),
step = parse_block(item['step']),
block = parse_block(item['block']),
extras = parse_extras(item['extras']),
sync_type = item['sync'],
fun = parse_block(item['fun']),
body = parse_block(item['body']))
for p in item['args']:
c.args.append(parse_argparm(CreateArg(), 'arg', p))
if 'fid' in item and item['fid']:
c.fid_lvalue = parse_block(item['fid'])
if 'result' in item and item['result']:
c.result_lvalue = parse_block(item['result'])
return c
def parse_indexdecl(item):
return IndexDecl(loc = item['loc'],
indexname = item['name'].strip())
def parse_spawndecl(item):
return SpawnDecl(loc = item['loc'],
spawnname = item['name'].strip())
def parse_spawnsync(item):
return SpawnSync(loc = item['loc'],
rhs = parse_block(item['rhs']))
def parse_scope(item):
s = Scope(loc = item['loc'],
loc_end = item['loc_end'])
s += parse_block(item['body'])
return s
def parse_attr(item):
n = item['name'].strip()
del item['type']
del item['name']
for k in item:
item[k] = item[k].strip()
return Attr(name = n,
payload = item)
def parse_extras(items):
if len(items) == 0:
return None
b = Extras()
for item in items:
if isinstance(item, dict):
t = item['type']
if t == 'attr': b += parse_attr(item)
else: unexpected(item)
else:
assert isinstance(item, str)
# ignore strings
if len(b) > 0:
return b
return None
def parse_block(items):
if len(items) == 0:
return None
b = Block()
#print "new block %x (len %d)" % (id(b), len(b))
for item in items:
#print "parse block %x (len %d): item %x: %r" % (id(b), len(b), id(item), item)
if isinstance(item, dict):
t = item['type']
if t == 'indexdecl': b += parse_indexdecl(item)
elif t == 'getp': b += parse_varuse(GetP(), item)
elif t == 'setp': b += parse_varuse(SetP(), item)
elif t == 'geta': b += parse_varuse(GetA(), item)
elif t == 'seta': b += parse_varuse(SetA(), item)
elif t == 'create': b += parse_create(item)
elif t == 'break': b += parse_break(item)
elif t == 'end_thread': b += parse_end_thread(item)
elif t == 'decl_funptr': b += parse_funptrdecl(item)
elif t == 'scope': b += parse_scope(item)
elif t == 'spawndecl': b += parse_spawndecl(item)
elif t == 'spawnsync': b += parse_spawnsync(item)
else: unexpected(item)
else:
assert isinstance(item, str)
csp = item.strip(' \t')
if len(csp) > 0:
b += Opaque(item)
#print "parse block %x: item %x -- END (len %d)" % (id(b), id(item), len(b))
if len(b) > 0:
return b
return None
def parse_argparm(p, cat, item):
#print "parse argparm %x: item %x: %r" % (id(p), id(item), item)
t = item['type'].replace('_mutable','')
if not t.endswith(cat):
unexpected(item)
p.loc = item['loc']
p.type = item['type']
p.ctype = CType(items = item['ctype'])
p.name = item['name'].strip()
if item.has_key('init'):
p.init = parse_block(item['init'])
return p
def parse_break(item):
return Break(loc = item['loc'])
def parse_end_thread(item):
return EndThread(loc = item['loc'])
def parse_funptrdecl(item):
d = FunDeclPtr(loc = item['loc'],
loc_end = item['loc_end'],
name = item['name'].strip(),
extras = parse_extras(item['extras']))
for p in item['params']:
d += parse_argparm(FunParm(), 'parm', p)
return d
def parse_fundecl(item):
d = FunDecl(loc = item['loc'],
loc_end = item['loc_end'],
name = item['name'].strip(),
extras = parse_extras(item['extras']))
for p in item['params']:
d += parse_argparm(FunParm(), 'parm', p)
return d
def parse_fundef(item):
d = FunDef(loc = item['loc'],
loc_end = item['loc_end'],
name = item['name'].strip(),
extras = parse_extras(item['extras']),
body = parse_block(item['body']))
for p in item['params']:
d += parse_argparm(FunParm(), 'parm', p)
return d
def parse_program(source):
source = eval(source)
p = Program()
for item in source:
if type(item) == type({}):
t = item['type']
if t == 'decl': p += parse_fundecl(item)
elif t == 'decl_funptr': p += parse_funptrdecl(item)
elif t == 'fundef': p += parse_fundef(item)
elif t == 'scope': p += parse_scope(item)
else: unexpected(item)
else: p += Opaque(item)
return p
__all__ = ['parse_program']
|
knz/slcore
|
slc/tools/slc/input/parse.py
|
Python
|
gpl-3.0
| 6,199 | 0.042104 |
"""The WaveBlocks Project
IOM plugin providing functions for handling various
overlap matrices of linear combinations of general
wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
import numpy as np
def add_overlaplcwp(self, parameters, timeslots=None, matrixsize=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Add storage for various overlap matrices. We can store one matrix type
per key.
========= ======
Key name Matrix
========= ======
``ov`` :math:`\langle\Upsilon | \Upsilon\rangle`
``ovkin`` :math:`\langle\Upsilon | T | \Upsilon\rangle`
``ovpot`` :math:`\langle\Upsilon | V(\underline{x}) | \Upsilon\rangle`
========= ======
Note that 'strange' errors occur if we later try to load or save
matrices for a key we did not initialise with this function.
:param parameters: A :py:class:`ParameterProvider` instance. It can
be empty and is not used at the moment.
:param timeslots: The number of time slots we need. Can be set to ``None``
to get automatically growing datasets.
:param matrixsize: The (maximal) size of each of the overlap matrices. If specified
this remains fixed for all timeslots. Can be set to ``None`` (default)
to get automatically growing datasets.
:type matrixsize: Pair of integers or ``None``.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
valid_keys = ("ov", "ovkin", "ovpot")
# Create the dataset with appropriate parameters
grp_ov = self._srf[self._prefixb + str(blockid)].create_group("overlaplcwp")
if timeslots is None:
T = 0
Ts = None
csTs = 128
else:
T = timeslots
Ts = timeslots
csTs = min(128, Ts)
if matrixsize is None:
Jr = 0
Jc = 0
Jrs = None
Jcs = None
csJrs = 128
csJcs = 128
else:
Jr, Jc = matrixsize
Jrs, Jcs = matrixsize
csJrs = min(128, Jrs)
csJcs = min(128, Jcs)
for k in key:
if k not in valid_keys:
raise ValueError("Unknown key value " + str(k))
name = k[2:]
daset_tg = grp_ov.create_dataset("timegrid" + name, (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1)
grp_ov.create_dataset("shape" + name, (T, 2), dtype=np.integer, chunks=(csTs, 2), maxshape=(Ts, 2))
grp_ov.create_dataset("overlap" + name, (T, Jr, Jc), dtype=np.complexfloating, chunks=(1, csJrs, csJcs), maxshape=(Ts, Jrs, Jcs))
daset_tg.attrs["pointer"] = 0
def delete_overlaplcwp(self, blockid=0):
r"""Remove the stored overlap matrices.
:param blockid: The ID of the data block to operate on.
"""
try:
del self._srf[self._prefixb + str(blockid) + "/overlaplcwp"]
except KeyError:
pass
def has_overlaplcwp(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Ask if the specified data block has the desired data tensor.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
r = True
r &= ("overlaplcwp" in self._srf[self._prefixb + str(blockid)].keys())
if r and "ov" in key:
r &= ("overlap" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovpot" in key:
r &= ("overlappot" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovkin" in key:
r &= ("overlapkin" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
return r
def save_overlaplcwp(self, data, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Save overlap matrices of linear combinations of general wavepackets.
In principle this function also supports non-square matrices.
:param data: The data matrices to save.
:type data: A list of :py:class:`ndarray` entries.
:param timestep: The timestep at which we save the data.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
for item, datum in zip(key, data):
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
timeslot = self._srf[pathtg].attrs["pointer"]
# Write the data
self.must_resize(pathd, timeslot)
data = np.atleast_2d(np.squeeze(data))
rows, cols = data.shape
self.must_resize(pathd, rows - 1, axis=1)
self.must_resize(pathd, cols - 1, axis=2)
self._srf[pathd][timeslot, :rows, :cols] = data
self.must_resize(pathsh, timeslot)
self._srf[pathsh][timeslot, :] = np.array([rows, cols])
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathtg].attrs["pointer"] += 1
def load_overlaplcwp_timegrid(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the timegrid corresponding to the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to load. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having one column.
"""
tg = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
tg.append(self._srf[pathtg][:])
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
tg.append(self._srf[pathtg][:])
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
tg.append(self._srf[pathtg][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp_shape(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the shape of the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having two columns.
"""
tg = []
for item in key:
if item == "ov":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
tg.append(self._srf[pathsh][:])
elif item == "ovkin":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
tg.append(self._srf[pathsh][:])
elif item == "ovpot":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
tg.append(self._srf[pathsh][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp(self, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load overlap matrices of linear combinations of general wavepackets.
:param timestep: Load only the data of this timestep.
:param split: Split the data array into one array for each component.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` items. Their shapes depend on the
exact value of the above arguments.
"""
result = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
shape = self._srf[pathsh][index, :]
datum = self._srf[pathd][index, :shape[0], :shape[1]]
else:
datum = self._srf[pathd][:, :, :]
result.append(datum)
if len(result) == 1:
return result[0]
else:
return tuple(result)
|
WaveBlocks/WaveBlocksND
|
WaveBlocksND/IOM_plugin_overlaplcwp.py
|
Python
|
bsd-3-clause
| 10,811 | 0.003515 |
#!/usr/bin/env python
# Copyright (C) 2014 Aldebaran Robotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import rospy
from naoqi_driver.naoqi_node import NaoqiNode
from dynamic_reconfigure.server import Server
from naoqi_sensors.cfg import NaoqiMicrophoneConfig
from naoqi_msgs.msg import AudioBuffer
from naoqi import ALModule, ALBroker, ALProxy
class NaoqiMic (ALModule, NaoqiNode):
def __init__(self, name):
NaoqiNode.__init__(self, name)
self.myBroker = ALBroker("pythonBroker", "0.0.0.0", 0, self.pip, self.pport)
ALModule.__init__(self, name)
self.isSubscribed = False
self.microVersion = 0
# Create a proxy to ALAudioDevice
try:
self.audioProxy = self.get_proxy("ALAudioDevice")
except Exception, e:
rospy.logerr("Error when creating proxy on ALAudioDevice:")
rospy.logerr(str(e))
exit(1)
try:
self.robotProxy = self.get_proxy("ALRobotModel")
self.microVersion = self.robotProxy._getMicrophoneConfig()
except Exception, e:
rospy.logwarn("Could not retrieve microphone version:")
rospy.logwarn(str(e))
rospy.logwarn("Microphone channel map might not be accurate.")
def returnNone():
return None
self.config = defaultdict(returnNone)
rospy.loginfo('channel = %s'%self.config['channel'])
# ROS publishers
self.pub_audio_ = rospy.Publisher('~audio_raw', AudioBuffer)
# initialize the parameter server
self.srv = Server(NaoqiMicrophoneConfig, self.reconfigure)
def reconfigure( self, new_config, level ):
"""
Reconfigure the microphones
"""
rospy.loginfo('reconfigure changed')
if self.pub_audio_.get_num_connections() == 0:
rospy.loginfo('Changes recorded but not applied as nobody is subscribed to the ROS topics.')
self.config.update(new_config)
return self.config
# check if we are already subscribed
if not self.isSubscribed:
rospy.loginfo('subscribed to audio proxy, since this is the first listener with channel = %s'%new_config['channel'])
self.audioProxy.setClientPreferences(self.getName(), new_config['frequency'], new_config['channel'], 0)
self.audioProxy.subscribe(self.getName())
self.isSubscribed = True
self.config.update(new_config)
return self.config
def run(self):
r=rospy.Rate(2)
while self.is_looping():
if self.pub_audio_.get_num_connections() == 0:
if self.isSubscribed:
rospy.loginfo('Unsubscribing from audio bridge as nobody listens to the topics.')
self.release()
continue
if not self.isSubscribed:
self.reconfigure(self.config, 0)
r.sleep()
if self.isSubscribed:
self.release()
self.myBroker.shutdown()
def release(self):
self.audioProxy.unsubscribe(self.name)
self.isSubscribed=False
def processRemote(self, nbOfInputChannels, fNbOfInputSamples, timeStamp, inputBuff):
audio_msg = AudioBuffer()
# Deal with the sound
# get data directly with the _getInputBuffer() function because inputBuff is corrupted in python
mictmp = []
for i in range (0,len(inputBuff)/2) :
mictmp.append(ord(inputBuff[2*i])+ord(inputBuff[2*i+1])*256)
# convert 16 bit samples to signed 16 bits samples
for i in range (0,len(mictmp)) :
if mictmp[i]>=32768 :
mictmp[i]=mictmp[i]-65536
if self.config['use_ros_time']:
audio_msg.header.stamp = rospy.Time.now()
else:
audio_msg.header.stamp = rospy.Time(timeStamp)
audio_msg.frequency = self.config['frequency']
if self.config['channel'] == 0:
if self.microVersion == 0:
channels = [0,2,1,4]
else:
channels = [3,5,0,2]
else:
channels = [self.config['channel']]
audio_msg.channelMap = channels
audio_msg.data = mictmp
self.pub_audio_.publish(audio_msg)
|
ArthurVal/RIDDLE_naoqi_bridge
|
naoqi_sensors_py/src/naoqi_sensors/naoqi_microphone.py
|
Python
|
bsd-3-clause
| 4,844 | 0.007019 |
#!/usr/bin/env python
"""
voice_nav.py allows controlling a mobile base using simple speech commands.
Based on the voice_cmd_vel.py script by Michael Ferguson in the pocketsphinx ROS package.
"""
import roslib; #roslib.load_manifest('pi_speech_tutorial')
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from math import copysign
from sound_play.libsoundplay import SoundClient
class voice_cmd_vel:
def __init__(self):
self.rate = rospy.get_param("~rate", 5)
r = rospy.Rate(self.rate)
self.paused = False
self.voice = rospy.get_param("~voice", "voice_cmu_us_bdl_arctic_clunits")
self.wavepath = rospy.get_param("~wavepath", "")
# Create the sound client object
self.soundhandle = SoundClient()
rospy.sleep(1)
self.soundhandle.stopAll()
# Subscribe to the /recognizer/output topic to receive voice commands.
rospy.Subscriber('/recognizer/output', String, self.speechCb)
# A mapping from keywords to commands.
self.keywords_to_command = {'stop': ['stop', 'halt', 'abort', 'kill', 'panic', 'off', 'freeze', 'shut down', 'turn off', 'help', 'help me'],
'bye': ['bye', 'cheers', 'goodbye', 'see you', 'bye'],
'cafe' : ['cafe', 'campus', 'tea', 'coffee', 'eat'],
'hello': ['hi', 'hey', 'hello'],
'help' : ['help me', 'can help', 'help'],
'name' : ['your name', 'name'],
'wash' : ['washroom', 'toilet'],
'library' : ['library', 'book', 'borrow'],
'labs' : ['labs'],
'talk': ['talk to me?', 'really talk?', 'you talk', 'you really talk?', 'talk'],
'amazing' : ['amazing', 'wonderful'],
'psychology' : ['psychology'],
'teaching' : ['teaching', 'music'],
'engineering' : ['engineering'],
'biology' : ['biology', 'english', 'chemistry'],
'maths' : ['computing', 'mathematics'],
'geo' : ['geology', 'geography'],
'marine' : ['marine'],
'art' : ['art'],
'roland' : ['reception', 'architecture'],
'business' : ['business'],
'staff' : ['staff'],
'sports' : ['sports'],
'robots' : ['robotics', 'robots'],
'visit' : ['visit', 'to do'],
'supermarket' : ['shop', 'supermarket'],
'cashpoint' : ['cash points', 'ATM', 'cash machines'],
'day' : ['day', 'today'],
'weather' : ['weather'],
'pause': ['pause speech'],
'continue': ['continue speech']}
rospy.loginfo("Ready to receive voice commands")
# We have to keep publishing the cmd_vel message if we want the robot to keep moving.
while not rospy.is_shutdown():
r.sleep()
def get_command(self, data):
for (command, keywords) in self.keywords_to_command.iteritems():
for word in keywords:
if data.find(word) > -1:
return command
def speechCb(self, msg):
command = self.get_command(msg.data)
rospy.loginfo("Command: " + str(command))
if command == 'pause':
self.paused = True
elif command == 'continue':
self.paused = False
if self.paused:
return
if command == 'hello':
self.soundhandle.say("Greetings!.", self.voice)
if command == 'help':
self.soundhandle.say("Ask me questions", self.voice)
if command == 'talk':
self.soundhandle.say("yes, I can", self.voice)
if command == 'bye':
self.soundhandle.say("Bye Bye", self.voice)
if command == 'weather':
self.soundhandle.say("I Don't know.", self.voice)
if command == 'supermarket':
self.soundhandle.say("The nearest supermarket is the TESCO!. ", self.voice)
if command == 'day':
self.soundhandle.say("It's tuesday!.", self.voice)
if command == 'psychology':
self.soundhandle.say("It's in link building!", self.voice)
if command == 'teaching':
self.soundhandle.say("the rolle building!.", self.voice)
if command == 'engineering':
self.soundhandle.say("That's right here!.", self.voice)
if command == 'biology':
self.soundhandle.say("It's is in the Davy building!.!", self.voice)
if command == 'maths':
self.soundhandle.say("In the babbage building!.!", self.voice)
if command == 'geo':
self.soundhandle.say("It's in the Fitzroy building!.!", self.voice)
if command == 'marine':
self.soundhandle.say("In the reynolds And the marine building.! ", self.voice)
if command == 'art':
self.soundhandle.say(" in the scott building!.!", self.voice)
if command == 'roland':
self.soundhandle.say(" in the roland levinsky building!.!", self.voice)
if command == 'business':
self.soundhandle.say("should be cookworthy building!", self.voice)
if command == 'staff':
self.soundhandle.say("In the Portland Square building!", self.voice)
if command == 'sports':
self.soundhandle.say("It's the Nancy Astor building. ", self.voice)
if command == 'robots':
self.soundhandle.say("in Smeaton's building or in Portland Square. !", self.voice)
if command == 'cashpoint':
self.soundhandle.say("There are some on the eastern exit of this building.!!", self.voice)
if command == 'visit':
self.soundhandle.say("Well, you can walk along the seashore. May be.!", self.voice)
if command == 'name':
self.soundhandle.say("charlie.", self.voice)
if command == 'amazing':
self.soundhandle.say("thank you so much.", self.voice)
if command == 'cafe':
self.soundhandle.say(" at the S U shop.", self.voice)
if command == 'wash':
self.soundhandle.say("the second floor and the third floor.", self.voice)
if command == 'library':
self.soundhandle.say("It's next to the Smeaton's building.", self.voice)
if command == 'labs':
self.soundhandle.say(" on the third floor.", self.voice)
def cleanup(self):
# When shutting down be sure to stop the robot! Publish a Twist message consisting of all zeros.
rospy.loginfo("Shutting Down..")
if __name__=="__main__":
rospy.init_node('voice_nav')
try:
voice_cmd_vel()
except:
pass
|
jdekerautem/TurtleBot-Receptionist
|
pocketsphinx_files/notsotalkative.py
|
Python
|
mit
| 6,969 | 0.033434 |
from PerfectMatchingData import *
from Face import *
from Vertex import *
from Graph import *
from VertexList import *
from Output import *
from KekuleanMethods import *
from Checkers import *
from RequiredEdgeMethods import *
from Tkinter import *
from AppInformation import *
from random import randint
import time
import os
import shutil
import multiprocessing as mp
import threading
Break = False
BreakLoop = False
#These methods the main drivers of the program. Some of their helper methods are also present here.
settings = {}
#function that reads in the graph returns a 2D string list of the graph
def getInput(fileName):
faceGraph = []
inputFile = open(fileName, 'r')
row = inputFile.readline()
y = 0
while len(row) > 0:
row = row.replace('\n', '')
row = row.split(" ")
for i in range(len(row)):
x = row[i]
faceGraph.append((Face(int(x), y)))
row = inputFile.readline()
y += 1
inputFile.close()
return faceGraph
def getSettings():
fileName = "settings.txt"
inputFile = open(fileName, 'r')
lineNumber = 0
minW = 0
maxW = 0
minH = 0
maxH = 0
line = inputFile.readline()
while len(line) > 0:
line = line.replace('\n', '')
settings[lineNumber] = float(line)
line = inputFile.readline()
lineNumber += 1
inputFile.close()
def resetGraph(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeGraph(root,appInfo):
root.geometry("600x400")
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,choice,selection,choiceEntry,fileName = "graph.txt"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = fileName
faceGraph = getInput(fileName)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetGraph(root,appInfo,submitGraph,graphNumberEntry,view)
#A user-entered number of graphs are generated and tested for Kekulean-ness and written to their proper text files
def randomIntoFiles():
kekuleanFile = open("Kekuleans.txt", "w")
notKekuleanFile = open("NotKekulean.txt", "w")
numK = 0
numNotK = 0
trials = int(raw_input("How many graphs would you like to create? "))
print "\n" #just to provide some visual space
t1 = time.time()
for i in range(trials):
faceGraph = createRandomConnectedGraph()
vGraph = makeVertexGraph(faceGraph)
randGraph = Graph(faceGraph, vGraph)
if isKekulean(randGraph) == True:
numK += 1
kekuleanFile.write("Graph #" + str(numK) + "\n")
kekuleanFile.write(randGraph.simpleToString() + '\n')
else:
numNotK += 1
notKekuleanFile.write("Graph #" + str(numNotK) + "\n")
notKekuleanFile.write(randGraph.simpleToString() + '\n')
#print randGraph
#print "\n"
t2 = time.time()
print "\n" + str(numK) + " Kekulean graph(s) were found.\n" + str(numNotK) + " non-Kekulean graph(s) were found."
print "Time elapsed (in seconds): " + str(t2 - t1) + "\n"
kekuleanFile.close()
notKekuleanFile.close()
#creates a random Kekulean graph ands does stuff with it and saves it to an png
def createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
randomGraph = _createRandomKekulean()
print "There are", len(randomGraph.getVertexGraph()), "vertices"
graphs = assignMatching(randomGraph)
graphs.sort()
if len(graphs) > 0:
#save graphs as PNG file
savePNG(graphs, "graphs - Fries.png")
Graph.comparison = 'clars'
graphs.sort()
savePNG(graphs, "graphs - Clars.png")
while True:
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
while choice.lower() != 'fries' and choice.lower() != 'clars' and choice.lower() != 'quit':
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
if choice.lower() == 'clars':
Graph.comparison = 'clars'
elif choice.lower() == 'fries':
Graph.comparison = 'fries'
else:
break
graphs.sort()
graphs.reverse()
print "There are", len(graphs), "Kekulean structures"
displayGraphs(graphs)
else:
print "error - Graph is Kekulean but has no perfect matching - see error.txt for graph"
errorFile = open("error.txt", "w")
errorFile.write(randomGraph.simpleToString() + '\n')
#Creates a random planar graph, which may not be connected
def createRandomGraph():
height = randint(settings[2], settings[3])
randGraph = []
for i in range(height):
rowLength = randint(settings[0], settings[1])
row = getRow(rowLength, i)
while len(row) == 0:
row = getRow(rowLength, i)
randGraph.extend(row)
if checkAlignment(randGraph) == False:
randGraph = createRandomGraph()
return randGraph
def checkAlignment(graph):
for face in graph:
if face.getX() == 0:
break
else:
#there is no face on the y-axis
return False
for face in graph:
if face.getY() == 0:
break
else:
#there is no face on the x-axis
return False
#there is a face on the x-axis
return True
def createRandomConnectedGraph():
g = createRandomGraph()
while isConnected(faceGraphToInts(g)) == False:
g = createRandomGraph()
return g
#generates a row for the the createRandomGraph method
def getRow(rl, rowNum):
r = []
for j in range(rl):
chance = randint(0, 100)
if chance > settings[4] * 100:
r.append(Face(j, rowNum))
return r
def _createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
while isKekulean(randomGraph) == False:
#print "making K"
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
if isKekulean(randomGraph):
return randomGraph
else:
return _createRandomKekulean()
def createManyKekuleans():
graphs = [] #list of kekulean graphs
graphList = [] #list of the Kekulean graphs with their matchings, and Fries/Clars Faces
trials = int(raw_input("How many graphs would you like to create? "))
pool = mp.Pool(mp.cpu_count())
results = [pool.apply_async(_createRandomKekulean) for x in range(trials)]
graphs = [r.get() for r in results]
for g in graphs:
graphList.extend(assignMatching(g))
graphList.sort()
if len(graphList) > 0:
print "There are", len(graphList), "Kekulean structures"
displayGraphs(graphList)
def testKekuleanThms():
conflictFile = open("conflict.txt", "w")
interval = float(raw_input("How many hours would you like to run the program?"))
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
counter = 0
while t2 - t1 < timeLimit:
print "graph #" + str(counter)
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
nelsonThm = isOldKekulean(randomGraph)
perfectMatchingThm = isKekulean(randomGraph)
if nelsonThm != perfectMatchingThm:
conflictFile.write("Perfect matching: " + str(perfectMatchingThm) + " Nelson Thm: " + str(nelsonThm) + "\n")
conflictFile.write(randomGraph.simpleToString())
conflictFile.write("\n")
t2 = time.time()
counter += 1
conflictFile.close()
#takes a row and returns a the number of vertical edges in that row
def getRowEdgeCount(row):
edgeCount = 0
f = 0
for i in range(len(row)):
edgeCount += 1
try:
f = row[i+1]
except:
f = None
if row[i] + 1 != f or f == None:
edgeCount += 1
return edgeCount
def getMinRows(g):
minRows = {}
index = 0
minEdges = sys.maxint
for r in g:
edgeCount = getRowEdgeCount(r)
if edgeCount < minEdges:
minEdges = edgeCount
minRows.clear()
minRows[index] = r
elif edgeCount == minEdges:
minRows[index] = r
index += 1
return minRows
#counts up the number of peaks above each row and stores those values in a list at indexes that correspond to the the row of the graph
def getPeaksAboveRows(g):
peaksAboveRow = [0]*(len(g))
for r in range(len(g)):
#print "r: " + str(r)
row = g[r]
if r > 0:
peaksAboveRow[r] += peaksAboveRow[r-1]
for col in range(len(row)):
face = row[col]
if searchRow(face, True, g, r) == True:
peaksAboveRow[r] += 1
#print "Peak at: " + str(r) + ", " + str(col)
if searchRow(face, False, g, r) == True and r < len(g)-1:
peaksAboveRow[r+1] -= 1
#print "Valley at: " + str(r) + ", " + str(col)
peaksAboveRow[r] = abs(peaksAboveRow[r])
return peaksAboveRow
#Theorem I devoloped
def NelsonThm(peaks, g):
kekulean = True
minRows = getMinRows(g)
for i, row in minRows.items():
if peaks[i] > getRowEdgeCount(row):
kekulean = False
break
return kekulean
#ckesks of a graph is Kekulean and returns a boolean
def isOldKekulean(graph):
fg = faceGraphToInts(graph.getFaceGraph())
peaksAbove = getPeaksAboveRows(fg)
#print peaksAbove
kekulean = NelsonThm(peaksAbove, fg)
return kekulean
def getUpperBounds(graph):
#faceGraph = getInput(filename)
#vertexGraph = makeVertexGraph(faceGraph)
#graph = Graph(faceGraph, vertexGraph)
kekulean = isKekulean(graph)
if kekulean == True:
rowCount = [0] * graph.getNumberOfRows()
whiteCount = [0] * graph.getNumberOfRows()
blackCount = [0] * graph.getNumberOfRows()
print "len:", len(whiteCount)
for v in graph.getVertexGraph():
#even y numbers mean the vertex is marked white on the graph
if v.getY() % 2 == 0:
index = v.getY() / 2
if index < len(whiteCount):
whiteCount[index] += 1
#The else implies that the vertex's y is odd, and thus the verex is marked black
else:
index = (v.getY() - 1) / 2
if index < len(blackCount):
blackCount[index] += 1
print "Upper Bonds of the graph per row:"
for index in range(len(rowCount)):
count = abs(sum(whiteCount[0:index+1]) - sum(blackCount[0:index+1]))
print count
rowCount[index] = count
totalUpperBonds = sum(rowCount)
print "Upper bond of the graph:", totalUpperBonds
else:
print "The graph is not Kekulean"
def testConjectureSameFaces(root,interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
graphList = []
graphNumber = 0
counter = 0
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#must be 'fries' or 'clars'
Graph.comparison = 'clars'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
#h.setString(structures[0].simpleToString())
#is the data right?
#print "Verts:", h.getNumVertices()
#print "Structures:", h.getNumStructures()
#print "Clar:", h.getFriesNumber()
for g in graphList:
if(h.getFaces() == g.getFaces()):
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
#first part
if h.getClarsNumber() > g.getClarsNumber():
print 'Conjecture is false:'
drawConflictsCC(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
graphNumber += 1
text.update_idletasks()
quit.update_idletasks()
scrollbar.update_idletasks()
text.destroy()
scrollbar.destroy()
quit.destroy()
#second part
def testConjectureSameFacesKKFF(root, interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
graphList = []
graphNumber = 0
counter = 0
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#must be 'fries' or 'clars'
Graph.comparison = 'fries'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
clarNumberStructure = []
friesNumberStructure = []
for g in graphList:
if(h.getFaces() == g.getFaces()):
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
if h.getFriesNumber() > g.getFriesNumber():
drawConflictsKKFF(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
graphNumber += 1
text.update_idletasks()
quit.update_idletasks()
scrollbar.update_idletasks()
text.destroy()
scrollbar.destroy()
quit.destroy()
def testConjectureSameFacesFFCC(root, interval):
clarNumberStructures = []
friesNumberStructures = []
graphs = []
graphList = []
temp = 0
graphNumber = 0
counter = 0
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
randomGraph.setMaxClarManual(setMaxClar(randomGraph))
randomGraph.setMaxFriesManual(setMaxFries(randomGraph))
h = structures[-1]
graphs.append(randomGraph)
h.setMaxClarManual(setMaxClar(randomGraph))
h.setMaxFriesManual(setMaxFries(randomGraph))
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
graphCount = 0
graphNumber += 1
for g in graphList:
if(g.getFaces() == h.getFaces()):
if g.getNumVertices() == h.getNumVertices():
if g.getNumStructures() < h.getNumStructures():
if g.getMaxClar() > h.getMaxClar():
if g.getMaxFries() < h.getMaxFries():
print 'Conjecture is false:\n'
saveClarFaceFFCC(graphs[graphCount],randomGraph,temp)
saveFriesFaceFFCC(graphs[graphCount],randomGraph,temp)
folderName = "FFCCConjectureConflicts"
fileName = folderName + "/" + str(randomGraph.getNumVertices()) + "_" + str(temp)+ "/info" + ".txt"
f = open(fileName,'w')
f.write("C1: " + str(g.getMaxClar()) + " C2: " + str(h.getMaxClar()) + " F1: " + str(g.getMaxFries()) + " F2: " + str(h.getMaxFries()) + "\n")
f.write(str(faceGraphToInts(g.getFaceGraph())) + "\n")
f.write(str(faceGraphToInts(h.getFaceGraph())) + "\n")
f.close()
temp += 1
graphCount += 1
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
def setMaxFries(graph):
g = graph.getFaceGraph()
v = makeVertexGraph(g)
G = Graph(g,v)
structures = assignMatching(G)
Graph.comparison = 'fries'
structures.sort()
return structures[-1].getFriesNumber()
def setMaxClar(graph):
g = graph.getFaceGraph()
v = makeVertexGraph(g)
G = Graph(g,v)
structures = assignMatching(G)
Graph.comparison = 'clars'
structures.sort()
return structures[-1].getClarsNumber()
def saveClarFaceFFCC(graph1,graph2,count):
g1 = graph1.getFaceGraph()
g2 = graph2.getFaceGraph()
v1 = makeVertexGraph(g1)
v2 = makeVertexGraph(g2)
G1 = Graph(g1,v1)
G2 = Graph(g2,v2)
structures1 = assignMatching(G1)
structures2 = assignMatching(G2)
Graph.comparison = 'clars'
structures1.sort()
structures2.sort()
h1 = structures1[-1]
h2 = structures2[-1]
if not os.path.exists("FFCCConjectureConflicts"):
os.mkdir("FFCCConjectureConflicts")
folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)
#setup folder
if not os.path.exists(folderName):
os.mkdir(folderName)
#print "adding"
fileName1 = folderName + "/clar1" + ".png"
fileName2 = folderName + "/clar2" + ".png"
#print fileName1
saveSinglePNG(h1,fileName1)
saveSinglePNG(h2,fileName2)
def saveFriesFaceFFCC(graph1,graph2,count):
g1 = graph1.getFaceGraph()
g2 = graph2.getFaceGraph()
v1 = makeVertexGraph(g1)
v2 = makeVertexGraph(g2)
G1 = Graph(g1,v1)
G2 = Graph(g2,v2)
structures1 = assignMatching(G1)
structures2 = assignMatching(G2)
Graph.comparison = 'fries'
structures1.sort()
structures2.sort()
h1 = structures1[-1]
h2 = structures2[-1]
if not os.path.exists("FFCCConjectureConflicts"):
os.mkdir("FFCCConjectureConflicts")
folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)
#setup folder
if not os.path.exists(folderName):
os.mkdir(folderName)
#print "adding"
fileName1 = folderName + "/fries1" + ".png"
fileName2 = folderName + "/fries2" + ".png"
#print fileName1
saveSinglePNG(h1,fileName1)
saveSinglePNG(h2,fileName2)
def testConjectureDifferentFaces(hours=0):
graphList = []
results = open("results.txt", "w")
results.write("The program actually run!")
if hours == 0:
interval = float(raw_input("How many hours would you like to run the program? "))
else:
interval = hours
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
counter = 0
while t2 - t1 < timeLimit:
print "graph #" + str(counter)
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
for f in randomGraph.getFaceGraph():
pairs = randomGraph.getBondedVertices(f)
print str(pairs)
#must be 'fries' or 'clars'
Graph.comparison = 'clars'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
#h.setString(structures[0].simpleToString())
#is the data right?
#print "Verts:", h.getNumVertices()
#print "Structures:", h.getNumStructures()
#print "Clar:", h.getFriesNumber()
for g in graphList:
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
#first part
if h.getClarsNumber() > g.getClarsNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Clars: ' + str(h.getClarsNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Clars: ' + str(g.getClarsNumber()) + " Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsCC(g, h)
#second part
if h.getFriesNumber() > g.getFriesNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Fries: ' + str(h.getFriesNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Fries: ' + str(g.getFriesNumber()) + " Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsKKFF(g, h)
#third part
if h.getClarsNumber() > g.getClarsNumber():
if h.getFriesNumber() < g.getFriesNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Clars: ' + str(h.getClarsNumber()) + "graph H: Fries: " + str(h.getFriesNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Clars: ' + str(g.getClarsNumber()) + "graph G: Fries: " + str(g.getFriesNumber()) +" Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsFFCC(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
def findHighestClars(graphs):
clars = 0
for g in graphs:
if g.getClarsNumber() > clars:
clars = g.getClarsNumber()
return clars
def _findRequiredEdges(graphs):
masterSet = getRequiredSet(graphs)
if len(masterSet) > 0:
for edge in masterSet:
v1, v2 = edge
v1.required = True
v2.required = True
return True
else:
return False
def findRequiredEdges(hours=0):
if not os.path.exists("requiredEdges"):
os.mkdir("requiredEdges")
edgeFile = open("requiredEdges/RequiredEdges.txt", "w")
graphNumber = 0
rqNum = 0
flag = False
if hours == 0:
interval = float(raw_input("How many hours would you like to run the program? "))
else:
interval = hours
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
print "graph", graphNumber
flag = False
graph = _createRandomKekulean()
graphs = assignMatching(graph)
for f in graph.getFaceGraph():
pairs = graph.getBondedVertices(f)
print str(pairs)
flag = _findRequiredEdges(graphs)
if flag == True:
print "Found graph with required edges"
edgeFile.write("Graph: " + str(rqNum) + "\n")
edgeFile.write(graph.simpleToString())
edgeFile.write("\n\n")
#save PNG's
fileName = "requiredEdges/Graph" + str(rqNum) + ".png"
saveSinglePNG(graphs[0], fileName)
rqNum += 1
graphNumber += 1
t2 = time.time()
def BreakModule():
global Break
Break = True
def BreakLoop():
global BreakLoop
BreakLoop = True
def combineGraphs(root,interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
graphNumber = 0
superGraphNumber = 0
deletedCount = 0
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT,fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command=text.yview)
storedGraphs = {}
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
text.insert(CURRENT,"graph: " + str(graphNumber) + "\n")
if Break == True:
Break = False
quit.destroy()
break
flag = False
#new stuff
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#end new stuff
Graph.comparison = 'clars'
structures.sort()
randomGraph.maxClars = structures[-1].getClarsNumber()
req_edges = getRequiredSet(structures)
externalEdges = getExternalEdges(req_edges)
if len(externalEdges) > 0:
#add graph and edges to list
storedGraphs[randomGraph] = externalEdges
for g, edges in storedGraphs.items():
complements = getComplements(externalEdges, edges)
for edge, compEdge in complements:
faceA = (edge[0].getFaces() & edge[1].getFaces()).pop()
faceB = (compEdge[0].getFaces() & compEdge[1].getFaces()).pop()
x = faceA.getX() - faceB.getX()
y = faceA.getY() - faceB.getY()
if edge[2] == "TOP_RIGHT" and compEdge[2] == "BOTTOM_LEFT":
newGraph = offsetFaces(g, x, y + 1);
elif edge[2] == "RIGHT" and compEdge[2] == "LEFT":
newGraph = offsetFaces(g, x + 1, y);
elif edge[2] == "TOP_LEFT" and compEdge[2] == "BOTTOM_RIGHT":
newGraph = offsetFaces(g, x + 1, y + 1);
elif edge[2] == "BOTTOM_LEFT" and compEdge[2] == "TOP_RIGHT":
newGraph = offsetFaces(g, x, y - 1);
elif edge[2] == "LEFT" and compEdge[2] == "RIGHT":
newGraph = offsetFaces(g, x - 1, y);
elif edge[2] == "BOTTOM_RIGHT" and compEdge[2] == "TOP_LEFT":
newGraph = offsetFaces(g, x - 1, y - 1);
overlap = checkFaceOverlap(randomGraph, newGraph)
#print overlap
if overlap is False:
faceGraph = combineFaces(randomGraph, newGraph)
faceGraph = adjustForNegatives(faceGraph)
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
#start new stuff
if len(structures) > 0:
#setup folder
folderName = "CombinedTemps"
if not os.path.exists(folderName):
os.mkdir(folderName)
fileName = folderName + "/superGraph.txt"
f = open(folderName + "/superGraph" + str(superGraphNumber) + ".txt" ,'w')
f.write(str(superGraph) + '\n')
f.close()
Graph.comparison = 'clars'
structures.sort()
if not os.path.exists("CombinedGraphs"):
os.mkdir("CombinedGraphs")
folderNameCG = "CombinedGraphs/superGraph" + str(superGraphNumber)
#setup folder
if not os.path.exists(folderNameCG):
os.mkdir(folderNameCG)
superName = folderNameCG + "/superGraph" + str(superGraphNumber) + ".png"
saveSinglePNG(structures[0], superName)
addCombinationsPNG(randomGraph, newGraph,superGraph, superGraphNumber, deletedCount)
superGraphNumber += 1
graphNumber += 1
t2 = time.time()
quit.update_idletasks()
quit.destroy()
def resetCombinedGraphs(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeCombinedGraphsSetup(root,appInfo,path = "CombinedTemps",extension = ".txt"):
runningApps = []
root.geometry("600x400")
graphNumber = IntVar()
entry = Entry(root, textvariable = graphNumber)
entry.pack()
runningApps.append(entry)
if not os.path.exists(path):
os.mkdir(path)
num_files = len([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
num_files -= 1
#for i in range(0,num_files):
#oldFilename = path + "/superGraph" + str(k+1) + extension
#os.rename(oldFilename, path + "/superGraph" + str(i) + extension)
label = Label(root, text="There are " + str(num_files) + " files in the directory. Which wuold you like to look at?")
label.pack()
runningApps.append(label)
i = 0
submit = Button(root, text ="Submit", command = lambda: checkAnalyze(root,appInfo,num_files,quit,entry,label,i,graphNumber,submit,runningApps))
submit.pack(side = BOTTOM)
while i == 0:
i = graphNumber.get()
submit.update_idletasks()
entry.update_idletasks()
label.update_idletasks()
def checkAnalyze(root,appInfo,num_files,quit,entry,label,i,graphNumber,submit,runningApps):
submit.destroy()
again = Label(root, text="That file does not exist, please try again.")
submit = Button(root, text ="Submit", command = lambda: analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry))
submit.pack(side = BOTTOM)
if i < -1 or i > num_files:
again.pack()
else:
analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry)
while (i < -1 or i > num_files):
submit.update_idletasks()
entry.update_idletasks()
label.update_idletasks()
again.update_idletasks()
i = graphNumber.get()
def analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry):
submit.destroy()
again.destroy()
label.destroy()
entry.destroy()
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,i,choice,selection,choiceEntry,extension = ".txt",path = "CombinedTemps"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = "/superGraph" + str(i) + extension
faceGraph = getInput(path + "/superGraph" + str(i) + extension)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,i,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayCombinedClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayCombinedClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetCombinedGraphs(root,appInfo,submitGraph,graphNumberEntry,view)
def addCombinationsPNG(graph,newGraph,superGraph,superGraphNumber,deletedCount):
new1 = graph.getFaceGraph()
new2 = newGraph.getFaceGraph()
vertexG1 = makeVertexGraph(new1)
vertexG2 = makeVertexGraph(new2)
g1 = Graph(new1,vertexG1)
g2 = Graph(new2,vertexG2)
firstStructures = assignMatching(g1)
secondStructures = assignMatching(g2)
_findRequiredEdges(firstStructures)
_findRequiredEdges(secondStructures)
Graph.comparison = 'clars'
firstStructures.sort()
secondStructures.sort()
if(isKekulean(g2) == True and isKekulean(g1) == True):
folderNameCG = "CombinedGraphs/superGraph" + str(superGraphNumber)
firstName = folderNameCG + "/Graph" + str(1) + ".png"
secondName = folderNameCG + "/Graph" + str(2) + ".png"
saveSinglePNG(firstStructures[0], firstName)
saveSinglePNG(secondStructures[0], secondName)
else:
directoryName = "CombinedDeleted"
if not os.path.exists(directoryName):
os.mkdir(directoryName)
folderName = "CombinedDeleted/superGraph" + str(superGraphNumber) + "_" + str(deletedCount)
if not os.path.exists(folderName):
os.mkdir(folderName)
f = superGraph.getFaceGraph()
v3 = makeVertexGraph(f)
g3 = Graph(f,v3)
superGraphStructure = assignMatching(g3)
fileName = folderName + "/superDeleted" + str(superGraphNumber) + ".png"
firstName = folderName + "/Graph" + str(1) + ".png"
secondName = folderName + "/Graph" + str(2) + ".png"
saveSinglePNG(superGraphStructure[0], fileName)
saveSinglePNG(firstStructures[0], firstName)
saveSinglePNG(secondStructures[0], secondName)
shutil.rmtree("CombinedGraphs/superGraph" + str(superGraphNumber))
superGraphNumber -= 1
deletedCount += 1
def removeCombinedDuplicates(path = "CombinedTemps",extension = ".txt"):
num_files = len([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
print num_files
num_files -= 7
print num_files
masterFaceGraph = []
for i in range(0,num_files):
filename = "/superGraph" + str(i) + extension
faceGraph = getInput(path + "/superGraph" + str(i) + extension)
masterFaceGraph.append(faceGraphToInts(faceGraph))
for f in range(0, len(masterFaceGraph)):
for k in range(f+1, len(masterFaceGraph)):
flag = True
for h in range(0,len(masterFaceGraph[f])):
a = masterFaceGraph[f][h]
b = masterFaceGraph[k][h]
if len(a) != len(b):
flag = False
break
for t in range(0,len(masterFaceGraph[f][h])):
c = a[t]
d = b[t]
if c != d:
flag = False
break
if flag == False:
break
if (flag == True):
masterFaceGraph.remove(masterFaceGraph[k])
shutil.rmtree("CombinedGraphs/superGraph" + str(k))
os.remove("CombinedTemps/superGraph" + str(k) + extension)
for i in range(k+1,num_files):
path1 = "CombinedGraphs"
path2 = "CombinedTemps"
oldFilename1 = path1 + "/superGraph" + str(i)
oldFilename2 = path2 + "/superGraph" + str(i) + extension
os.rename(oldFilename1 + "/superGraph" + str(i) + ".png", oldFilename1 + "/superGraph" + str(i-1) + ".png")
os.rename(oldFilename1, path1 + "/superGraph" + str(i-1))
os.rename(oldFilename2, path2 + "/superGraph" + str(i-1) + extension)
num_files -= 1
|
Jc11235/Kekulean_Program
|
GUI_Version/Ubuntu_Version/DriverMethods.py
|
Python
|
gpl-2.0
| 39,406 | 0.044054 |
import logging
import gym
import numpy as np
from chaos_theory.algorithm import DDPG
from chaos_theory.run.run_algorithm import run_online_algorithm
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
np.random.seed(1)
if __name__ == "__main__":
env_name = 'HalfCheetah-v1'
env = gym.make(env_name)
algorithm = DDPG(env, track_tau=0.001, discount=0.95)
run_online_algorithm(env, algorithm, max_length=1000, samples_per_update=1, verbose_trial=5, log_name='ddpg_'+env_name)
|
justinjfu/chaos_theory
|
scripts/run_ddpg.py
|
Python
|
gpl-3.0
| 526 | 0.001901 |
"""nubrain URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from nubrain.settings import BASE_URL, APP_NAME
from django.views.generic import RedirectView
from django.utils.translation import ugettext_lazy
urlpatterns = patterns('',
(r'^$', RedirectView.as_view(url='%s/admin/' % BASE_URL)),
url(r'^admin/', include(admin.site.urls)),
)
admin.site.site_title = ugettext_lazy(APP_NAME)
admin.site.site_header = ugettext_lazy('%s Admin' % APP_NAME)
admin.site.index_title = ugettext_lazy('%s Dashboard' % APP_NAME)
admin.autodiscover()
|
NuChwezi/nubrain
|
nubrain/urls.py
|
Python
|
mit
| 1,184 | 0.000845 |
from unittest.case import TestCase
from dateutil.parser import parse
from responsebot.models import User, Tweet
class UserModelTestCase(TestCase):
def test_create_from_raw_data(self):
created_at = 'Mon Apr 25 08:25:58 +0000 2016'
raw = {
'some_key': 'some value',
'created_at': created_at,
'status': {
'created_at': created_at
},
'following': True
}
expected_created_at = parse(created_at)
user = User(raw)
self.assertEqual(user.some_key, 'some value')
self.assertEqual(user.created_at, expected_created_at)
self.assertTrue(isinstance(user.tweet, Tweet))
self.assertEqual(user.tweet.created_at, expected_created_at)
self.assertEqual(user.following, True)
|
invinst/ResponseBot
|
tests/unit_tests/models/test_user_model.py
|
Python
|
apache-2.0
| 824 | 0 |
def triangle_sum(tri, r, c, h, memo):
if (r, c, h) in memo:
return memo[(r, c, h)]
ans = tri[r][c]
if h > 0:
ans += triangle_sum(tri, r + 1, c, h - 1, memo)
ans += triangle_sum(tri, r + 1, c + 1, h - 1, memo)
if h > 1:
ans -= triangle_sum(tri, r + 2, c + 1, h - 2, memo)
memo[(r, c, h)] = ans
return ans
def min_triangle_sum(tri):
memo = {}
minimum = tri[0][0]
for r in range(len(tri)):
for c in range(r + 1):
print(r, c)
for h in range(len(tri) - r):
print(r, c, h)
s = triangle_sum(tri, r, c, h, memo)
if s < minimum:
minimum = s
print(r, c, h, ':', minimum)
return minimum
def min_triangle_sum_2(tri):
memo = {}
for r in range(len(tri)):
s = 0
memo[(r, 0)] = 0
for c in range(0, r + 1):
s += tri[r][c]
memo[(r, c+1)] = s
minimum = tri[0][0]
for r in range(len(tri)):
for c in range(r + 1):
minimum_2 = 0
for h in range(len(tri) - r):
minimum_2 += memo[(r + h, c + h + 1)] - memo[(r + h, c)]
if minimum_2 < minimum:
minimum = minimum_2
print(r, c, h, ':', minimum)
return minimum
def make_triangle(n=1000):
triangle = [[0] * k for k in range(1, n + 1)]
r = 0
c = 0
t = 0
for k in range(n * (n + 1) // 2):
t = (615949 * t + 797807) % 2**20
triangle[r][c] = t - 2**19
c += 1
if c == len(triangle[r]):
r += 1
c = 0
return triangle
triangle = [
[ 15],
[-14, - 7],
[ 20, -13, - 5],
[- 3, 8, 23, -26],
[ 1, - 4, - 5, -18, 5],
[-16, 31, 2, 9, 28, 3],
]
triangle = make_triangle()
print(min_triangle_sum_2(triangle))
|
simonolander/euler
|
euler-150-searching-a-triangular-array-for-a-sub-triangle-having-minimum-sum.py
|
Python
|
mit
| 1,906 | 0.001574 |
#########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_security.utils import hash_password
from cloudify.cluster_status import (
DB_STATUS_REPORTER,
BROKER_STATUS_REPORTER,
MANAGER_STATUS_REPORTER,
MANAGER_STATUS_REPORTER_ID,
BROKER_STATUS_REPORTER_ID,
DB_STATUS_REPORTER_ID
)
from manager_rest.storage.models import Tenant, UserTenantAssoc
from manager_rest.storage import user_datastore
from manager_rest.constants import (
DEFAULT_TENANT_ID,
DEFAULT_TENANT_ROLE,
)
ADMIN_ROLE = 'sys_admin'
USER_ROLE = 'default'
USER_IN_TENANT_ROLE = 'user'
def get_admin_user():
return {
'username': 'admin',
'password': 'admin',
'role': ADMIN_ROLE
}
def get_status_reporters():
return [
{
'username': MANAGER_STATUS_REPORTER,
'password': 'password',
'role': MANAGER_STATUS_REPORTER,
'id': MANAGER_STATUS_REPORTER_ID
},
{
'username': BROKER_STATUS_REPORTER,
'password': 'password',
'role': BROKER_STATUS_REPORTER,
'id': BROKER_STATUS_REPORTER_ID
},
{
'username': DB_STATUS_REPORTER,
'password': 'password',
'role': DB_STATUS_REPORTER,
'id': DB_STATUS_REPORTER_ID
},
]
def get_test_users():
test_users = [
{
'username': 'alice',
'password': 'alice_password',
'role': ADMIN_ROLE
},
{
'username': 'bob',
'password': 'bob_password',
'role': USER_ROLE
},
{
'username': 'clair',
'password': 'clair_password',
'role': USER_ROLE,
'active': False
},
{
'username': 'dave',
'password': 'dave_password',
'role': USER_ROLE
}
]
return test_users
def add_users_to_db(user_list):
default_tenant = Tenant.query.get(DEFAULT_TENANT_ID)
for user in user_list:
role = user_datastore.find_role(user['role'])
user_obj = user_datastore.create_user(
username=user['username'],
password=hash_password(user['password']),
roles=[role]
)
default_tenant_role = user_datastore.find_role(DEFAULT_TENANT_ROLE)
user_obj.active = user.get('active', True)
user_tenant_association = UserTenantAssoc(
user=user_obj,
tenant=default_tenant,
role=default_tenant_role,
)
user_obj.tenant_associations.append(user_tenant_association)
user_datastore.commit()
|
cloudify-cosmo/cloudify-manager
|
rest-service/manager_rest/test/security_utils.py
|
Python
|
apache-2.0
| 3,245 | 0 |
#!/usr/bin/env python
# encoding: utf-8
"""
Staircase.py
Created by Tomas HJ Knapen on 2009-11-26.
Copyright (c) 2009 TK. All rights reserved.
"""
import os, sys, datetime
import subprocess, logging
import pickle, datetime, time
import scipy as sp
import numpy as np
# import matplotlib.pylab as pl
from math import *
class OneUpOneDownStaircase(object):
"""
OneUpOneDownStaircase object, for one-up-one-down staircase in its standard form.
"""
def __init__(self, initial_value, initial_stepsize, nr_reversals = 10, increment_value = None, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 40 ):
self.initial_value = initial_value
self.initial_stepsize = initial_stepsize
self.nr_reversals = nr_reversals
self.increment_value = increment_value
self.stepsize_multiplication_on_reversal = stepsize_multiplication_on_reversal
self.max_nr_trials = max_nr_trials
self.test_value = self.initial_value
self.present_increment_value = increment_value
# set up filler variables
self.past_answers = []
self.nr_trials = 0
self.present_nr_reversals = 0
def test_value(self):
return self.test_value
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
if correct: # answer was correct and so we lower the contrast/whatever value
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + self.present_increment_value
self.past_answers.append(correct)
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
class TwoUpOneDownStaircase(OneUpOneDownStaircase):
def __init__(self, initial_value, initial_stepsize, nr_reversals = 10, increment_value = None, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 40 ):
super(TwoUpOneDownStaircase, self).__init__(initial_value, initial_stepsize, nr_reversals = 10, increment_value = None, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 40)
self.past_answers = [0.5, 0.5, 0.5]
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
self.past_answers.append(correct)
nr_corrects_in_last_2_trials = np.array(self.past_answers, dtype = float)[-2:].sum()
if nr_corrects_in_last_2_trials == 2: # this subject is too good for this stimulus value
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + self.present_increment_value
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
class ThreeUpOneDownStaircase(TwoUpOneDownStaircase):
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
self.past_answers.append(correct)
nr_corrects_in_last_3_trials = np.array(self.past_answers, dtype = float)[-3:].sum()
if nr_corrects_in_last_3_trials == 3: # this subject is too good for this stimulus value
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + self.present_increment_value
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
class YesNoStaircase(object):
def __init__(self, initial_value, initial_stepsize, nr_reversals = 100, stepsize_multiplication_on_reversal = 0.75, max_nr_trials = 400 ):
self.initial_value = initial_value
self.initial_stepsize = initial_stepsize
self.nr_reversals = nr_reversals
self.stepsize_multiplication_on_reversal = stepsize_multiplication_on_reversal
self.max_nr_trials = max_nr_trials
self.test_value = self.initial_value
self.present_increment_value = initial_stepsize
# set up filler variables
self.past_answers = []
self.nr_trials = 0
self.present_nr_reversals = 0
def test_value(self):
return self.test_value
def answer( self, correct ):
continue_after_this_trial = True
self.nr_trials = self.nr_trials + 1
if correct: # answer was correct and so we lower the contrast/whatever value according to Kaernbach's method
self.test_value = self.test_value - self.present_increment_value
else:
self.test_value = self.test_value + 3.0 * self.present_increment_value
self.past_answers.append(correct)
if self.nr_trials > 1:
if self.past_answers[-1] != self.past_answers[-2]: # we have a reversal here
self.present_nr_reversals = self.present_nr_reversals + 1
if self.present_nr_reversals % 2 == 0:
self.present_increment_value = self.present_increment_value * self.stepsize_multiplication_on_reversal
if self.present_nr_reversals >= self.nr_reversals:
continue_after_this_trial = False
else:
pass
if self.nr_trials >= self.max_nr_trials:
continue_after_this_trial = False
return continue_after_this_trial
|
VU-Cog-Sci/PRF_experiment
|
exp_tools/Staircase.py
|
Python
|
mit
| 6,218 | 0.040849 |
from .exceptions import *
from .raster import BrotherQLRaster
from .brother_ql_create import create_label
|
pklaus/brother_ql
|
brother_ql/__init__.py
|
Python
|
gpl-3.0
| 110 | 0.009091 |
from ..writer.crawler import CrawlerWriter
from ..writer.run import RunConfigWriter
from ..writer.sentry import SentryConfigWriter
from ..writer.route import RouteConfigWriter
from ..writer.monitor import MonitorConfigWriter
class WriterFactory:
CRAWLER = 0
RUN_CONFIG = 1
SENTRY_CONFIG = 2
ROUTE_CONFIG = 3
MONITOR_CONFIG = 4
def __init__(self):
pass
@classmethod
def get_writer(self, writer_name=None):
""" Exceptions:
- AssertionError
"""
assert writer_name is not None, "writer_name is not defined."
if writer_name == WriterFactory.CRAWLER:
return CrawlerWriter()
elif writer_name == WriterFactory.RUN_CONFIG:
return RunConfigWriter()
elif writer_name == WriterFactory.SENTRY_CONFIG:
return SentryConfigWriter()
elif writer_name == WriterFactory.ROUTE_CONFIG:
return RouteConfigWriter()
elif writer_name == WriterFactory.MONITOR_CONFIG:
return MonitorConfigWriter()
|
franziz/arcrawler
|
lib/factory/writer.py
|
Python
|
gpl-3.0
| 940 | 0.03617 |
import pygame
import random
import item
import mob
import tile
class Mapgen(object):
def __init__(self, level):
self.xsiz = 10
self.ysiz = 10
self.biome = "random"
self.procedure = 0
self.zone = []
self.level = level
self.sizefactor = 2
#self.items = pygame.sprite.Group()
#self.mobs = pygame.sprite.Group()
#creates the base map
def generate(self,x,y,biome):
self.zone = []
self.xsiz = x
self.ysiz = y
self.biome = biome
self.sizefactor = (x/10)+(y/10)
landtype = 0
#for num in range(sizefactor*3):
# itemo = item.Item(self.level, self.level.items)
# itemo.set_type(random.randrange(6)+1)
#for umb in range(sizefactor*3):
# mobbo = mob.Mob(self.level, self.level.mobs)
# mobbo.set_type(random.randrange(7))
# mobbo.set_species(random.randrange(4)+1)
#main land generation
for a in range(x):
mapcol = []
for b in range(y):
#Purely Random
if (self.procedure == 0):
landtype = random.randrange(17)+1
#probability manipulation
if (self.procedure == 1):
if (biome == "grassland"):
common = [1,2,3,13]
uncommon = [4,5,6,7]
rare = [8,9,10]
vrare = [12,15]
self.level.passable = 1
if(biome == "forest"):
common = [3,4,5,9]
uncommon = [1,2,6]
rare = [7,13]
vrare = [10,11,12]
self.level.passable = 2
if(biome == "desert"):
common = [8,7]
uncommon = [16,17]
rare = [9,13]
vrare = [1,2]
self.level.passable = 7
landex = random.randrange(256)
if landex < 256:
landtype = random.choice(common)
if landex < 64:
landtype = random.choice(uncommon)
if landex < 16:
landtype = random.choice(rare)
if landex < 2:
landtype = random.choice(vrare)
#generate the tiles
acre = tile.Land(self.level, self.level.terrain)
if a == 0 or b == 0 or a == x-1 or b == y-1:
acre.set_type(0)
self.level.space.add(acre)
for mobbo in self.level.mobs:
mobbo.unpassable.add(acre)
else:
acre.set_type(landtype)
acre.get_image()
acre.spawn(a, b)
self.level.background.add(acre)
mapcol.append(acre)
self.zone.append( mapcol )
for a in range(len(self.zone)):
for b in range(len(self.zone[0])):
place = self.zone[a][b]
if place in self.level.space:
pass
else:
for wa in range(3):
for ha in range(3):
if a+wa-1 >= len(self.zone) or b+ha-1 >= len(self.zone[0]):
pass
else:
place.neighbors.add(self.zone[a+wa-1][b+ha-1])
return self.zone
#causes deserts to expand
def desertify(self):
for place in self.level.terrain:
place.desert_check()
#causes forests to grow
def grow_forest(self):
for place in self.level.terrain:
place.forest_check()
#lowers sea level
def sea_lower(self):
for place in self.level.terrain:
if place.flavnum == 15:
if random.randrange(100) < 80:
place.set_type(14)
if place.flavnum == 14:
if random.randrange(100) < 70:
place.set_type(13)
if place.flavnum == 13:
if random.randrange(100) < 60:
place.set_type(1)
#raises sea level
def sea_fill(self):
for place in self.level.terrain:
excepts = [0,15,14,12,11,10]
if place.flavnum == 15:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(14)
if place.flavnum == 14:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
if place.flavnum == 13:
for location in place.neighbors:
if random.randrange(100) < 10:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
#populates the map with mobs
def populate(self, density):
for a in range(self.sizefactor*density):
mobbo = mob.Mob(self.level, self.level.mobs)
mobbo.set_type(random.randrange(7))
mobbo.set_species(random.randrange(4)+1)
mobbo.unpassable.add(self.level.space)
mobbo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if mobbo.mapx == self.level.player1.mapx and mobbo.mapy == self.level.player1.mapy:
mobbo.kill()
#adds items to the map
def litter(self, density):
for a in range(self.sizefactor*density):
itemo = item.Item(self.level, self.level.items)
itemo.set_type(random.randrange(8))
itemo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if itemo.mapx == self.level.player1.mapx and itemo.mapy == self.level.player1.mapy:
itemo.kill()
#adds landmarks
def monumentalize(self, number):
for a in range(number):
monument = tile.Landmark(self.level, self.level.background)
monument.set_type(random.randrange(4))
monument.spawn(random.randrange(len(self.zone)-3)+1,random.randrange(len(self.zone[0])-3)+1)
pygame.sprite.spritecollide(monument, self.level.landmarks, True)
self.level.landmarks.add(monument)
|
Lincoln-Cybernetics/Explore-
|
mapgen.py
|
Python
|
unlicense
| 7,161 | 0.012847 |
# encoding=utf-8
# pykarta/geometry/from_text.py
# Copyright 2013--2020, Trinity College
# Last modified: 9 February 2020
import re
from . import Point
# Create a Point() from a text string describing a latitude and longitude
#
# Example from Wikipedia article Whitehouse: 38° 53′ 51.61″ N, 77° 2′ 11.58″ W
# \u2032 -- prime (minutes sign)
# \u2033 -- double prime (seconds sign)
# \u2019 -- single closing quote
# \u201d -- double closing quote
def PointFromText(coords_text):
if not re.search(u'^[\(\-0-9\.°\'\u2019\u2032"\u201d\u2033NSEW, \)]+$', coords_text, flags=re.IGNORECASE):
return None
#print "Pasted coordinates:", coords_text
# Make the format more standard
coords_text = coords_text.upper() # nsew -> NSEW
coords_text = coords_text.replace(u"(", u"") # remove parenthesis
coords_text = coords_text.replace(u")", u"")
coords_text = coords_text.replace(u"'", u"\u2032") # ASCII single quote (apostroph) to prime
coords_text = coords_text.replace(u"\u2019", u"\u2032") # right single quote to prime
coords_text = coords_text.replace(u'"', u'\u2033') # ASCII double quote to double prime
coords_text = coords_text.replace(u'\u201d', u'\u2033') # right double quote to double prime
words = _split_coords_text(coords_text)
lat = _parse_degrees(words[0], "NS")
lon = _parse_degrees(words[1], "EW")
return Point(lat, lon)
def _split_coords_text(coords_text):
m = re.match('^([^,]+),([^,]+)$', coords_text)
if m:
return (m.group(1), m.group(2))
m = re.match('^(\S+)\s+(\S+)$', coords_text)
if m:
return (m.group(1), m.group(2))
m = re.match('^([NS].+)([EW].+)$', coords_text)
if m:
return (m.group(1), m.group(2))
m = re.match('^(.+[NS])(.+[EW])$', coords_text)
if m:
return (m.group(1), m.group(2))
raise Exception("Two coordinates required")
def _parse_degrees(degrees_string, directions):
degrees_string = degrees_string.replace(u" ", u"") # remove spaces
sign = 1.0
if directions[0] in degrees_string: # N or E
degrees_string = degrees_string.replace(directions[0], "")
elif directions[1] in degrees_string: # S or W
degrees_string = degrees_string.replace(directions[1], "")
sign = -1.0
# Decimal degrees signed
m = re.search(u'^([-\d\.]+)°?$', degrees_string)
if m:
return float(m.group(1)) * sign
# Degrees, minutes, seconds
m = re.search(u'^(\d+)°(\d+)\u2032([\d\.]+)\u2033$', degrees_string)
if m:
degrees = int(m.group(1))
degrees += int(m.group(2)) / 60.0
degrees += float(m.group(3)) / 3600.0
return degrees * sign
m = re.search(u'^(\d+)°([\d\.]+)\u2032?$', degrees_string)
if m:
degrees = int(m.group(1))
degrees += float(m.group(2)) / 60.0
return degrees * sign
raise Exception("Failed to parse coordinate: %s" % degrees_string)
|
david672orford/pykarta
|
pykarta/geometry/from_text.py
|
Python
|
gpl-2.0
| 2,761 | 0.030215 |
# -*- coding: utf-8 -*-
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('announcements', '0008_auto_20150603_1401')]
operations = [migrations.AddField(model_name='announcement',
name='expiration_date',
field=models.DateTimeField(default=datetime.datetime(3000, 1, 1, 0, 0)),)]
|
jacobajit/ion
|
intranet/apps/announcements/migrations/0009_announcement_expiration_date.py
|
Python
|
gpl-2.0
| 432 | 0.002315 |
# Copyright (C) 2014 Linaro Limited
#
# Author: Neil Williams <neil.williams@linaro.org>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import atexit
import errno
import shutil
import tempfile
import datetime
import time
import pytz
import traceback
import os
from lava_dispatcher.action import (
LAVABug,
LAVAError,
JobError,
)
from lava_dispatcher.logical import PipelineContext
from lava_dispatcher.diagnostics import DiagnoseNetwork
from lava_dispatcher.protocols.multinode import MultinodeProtocol # pylint: disable=unused-import
from lava_dispatcher.utils.constants import DISPATCHER_DOWNLOAD_DIR
from lava_dispatcher.utils.filesystem import debian_package_version
class ZMQConfig(object):
"""
Namespace for the ZMQ logging configuration
"""
def __init__(self, logging_url, master_cert, slave_cert, ipv6):
self.logging_url = logging_url
self.master_cert = master_cert
self.slave_cert = slave_cert
self.ipv6 = ipv6
class Job(object): # pylint: disable=too-many-instance-attributes
"""
Populated by the parser, the Job contains all of the
Actions and their pipelines.
parameters provides the immutable data about this job:
action_timeout
job_name
priority
device_type (mapped to target by scheduler)
yaml_line
logging_level
job_timeout
Job also provides the primary access to the Device.
The NewDevice class only loads the specific configuration of the
device for this job - one job, one device.
"""
def __init__(self, job_id, parameters, logger): # pylint: disable=too-many-arguments
self.job_id = job_id
self.logger = logger
self.device = None
self.parameters = parameters
self.__context__ = PipelineContext()
self.pipeline = None
self.connection = None
self.triggers = [] # actions can add trigger strings to the run a diagnostic
self.diagnostics = [
DiagnoseNetwork,
]
self.timeout = None
self.protocols = []
self.compatibility = 2
# Was the job cleaned
self.cleaned = False
# Root directory for the job tempfiles
self.tmp_dir = None
# override in use
self.base_overrides = {}
self.started = False
@property
def context(self):
return self.__context__.pipeline_data
@context.setter
def context(self, data):
self.__context__.pipeline_data.update(data)
def diagnose(self, trigger):
"""
Looks up the class to execute to diagnose the problem described by the
specified trigger.
"""
trigger_tuples = [(cls.trigger(), cls) for cls in self.diagnostics]
for diagnostic in trigger_tuples:
if trigger is diagnostic[0]:
return diagnostic[1]()
return None
def describe(self):
return {'device': self.device,
'job': self.parameters,
'compatibility': self.compatibility,
'pipeline': self.pipeline.describe()}
def mkdtemp(self, action_name, override=None):
"""
Create a tmp directory in DISPATCHER_DOWNLOAD_DIR/{job_id}/ because
this directory will be removed when the job finished, making cleanup
easier.
"""
if override is None:
if self.tmp_dir is None:
create_base_dir = True
base_dir = DISPATCHER_DOWNLOAD_DIR
else:
create_base_dir = False
base_dir = self.tmp_dir
else:
if override in self.base_overrides:
create_base_dir = False
base_dir = self.base_overrides[override]
else:
create_base_dir = True
base_dir = override
if create_base_dir:
# Try to create the directory.
base_dir = os.path.join(base_dir, str(self.job_id))
try:
os.makedirs(base_dir, mode=0o755)
except OSError as exc:
if exc.errno != errno.EEXIST:
# When running unit tests
base_dir = tempfile.mkdtemp(prefix='pipeline-')
atexit.register(shutil.rmtree, base_dir, ignore_errors=True)
# Save the path for the next calls (only if that's not an override)
if override is None:
self.tmp_dir = base_dir
else:
self.base_overrides[override] = base_dir
# Create the sub-directory
tmp_dir = tempfile.mkdtemp(prefix=action_name + '-', dir=base_dir)
os.chmod(tmp_dir, 0o755)
return tmp_dir
def _validate(self):
"""
Validate the pipeline and raise an exception (that inherit from
LAVAError) if it fails.
"""
self.logger.info("Start time: %s (UTC)", pytz.utc.localize(datetime.datetime.utcnow()))
for protocol in self.protocols:
try:
protocol.configure(self.device, self)
except LAVAError:
self.logger.error("Configuration failed for protocol %s", protocol.name)
raise
except Exception as exc:
self.logger.error("Configuration failed for protocol %s", protocol.name)
self.logger.exception(traceback.format_exc())
raise LAVABug(exc)
if not protocol.valid:
msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors)
self.logger.exception(msg)
raise JobError(msg)
# Check that namespaces are used in all actions or none
namespaces = set()
for action in self.parameters["actions"]:
action_name = list(action.keys())[0]
namespaces.add(action[action_name]["namespace"])
# 'common' is a reserved namespace that should not be present with
# other namespaces.
if len(namespaces) > 1 and 'common' in namespaces:
msg = "'common' is a reserved namespace that should not be present with other namespaces"
self.logger.error(msg)
self.logger.debug("Namespaces: %s", ", ".join(namespaces))
raise JobError(msg)
# validate the pipeline
self.pipeline.validate_actions()
def validate(self):
"""
Public wrapper for the pipeline validation.
Send a "fail" results if needed.
"""
label = "lava-dispatcher, installed at version: %s" % debian_package_version(split=False)
self.logger.info(label)
self.logger.info("start: 0 validate")
start = time.time()
success = False
try:
self._validate()
except LAVAError as exc:
raise
except Exception as exc:
# provide useful info on command line, e.g. failed unit tests.
self.logger.exception(traceback.format_exc())
raise LAVABug(exc)
else:
success = True
finally:
if not success:
self.cleanup(connection=None)
self.logger.info("validate duration: %.02f", time.time() - start)
self.logger.results({"definition": "lava",
"case": "validate",
"result": "pass" if success else "fail"})
def _run(self):
"""
Run the pipeline under the run() wrapper that will catch the exceptions
"""
self.started = True
# Setup the protocols
for protocol in self.protocols:
try:
protocol.set_up()
except LAVAError:
raise
except Exception as exc:
self.logger.error("Unable to setup the protocols")
self.logger.exception(traceback.format_exc())
raise LAVABug(exc)
if not protocol.valid:
msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors)
self.logger.exception(msg)
raise JobError(msg)
# Run the pipeline and wait for exceptions
with self.timeout() as max_end_time:
self.pipeline.run_actions(self.connection, max_end_time)
def run(self):
"""
Top level routine for the entire life of the Job, using the job level timeout.
Python only supports one alarm on SIGALRM - any Action without a connection
will have a default timeout which will use SIGALRM. So the overarching Job timeout
can only stop processing actions if the job wide timeout is exceeded.
"""
try:
self._run()
finally:
# Cleanup now
self.cleanup(self.connection)
def cleanup(self, connection):
if self.cleaned:
self.logger.info("Cleanup already called, skipping")
return
# exit out of the pipeline & run the Finalize action to close the
# connection and poweroff the device (the cleanup action will do that
# for us)
self.logger.info("Cleaning after the job")
self.pipeline.cleanup(connection)
for tmp_dir in self.base_overrides.values():
self.logger.info("Override tmp directory removed at %s", tmp_dir)
try:
shutil.rmtree(tmp_dir)
except OSError as exc:
if exc.errno != errno.ENOENT:
self.logger.error("Unable to remove the directory: %s",
exc.strerror)
if self.tmp_dir is not None:
self.logger.info("Root tmp directory removed at %s", self.tmp_dir)
try:
shutil.rmtree(self.tmp_dir)
except OSError as exc:
if exc.errno != errno.ENOENT:
self.logger.error("Unable to remove the directory: %s",
exc.strerror)
# Mark cleanup as done to avoid calling it many times
self.cleaned = True
|
Linaro/lava-dispatcher
|
lava_dispatcher/job.py
|
Python
|
gpl-2.0
| 10,777 | 0.001299 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
|
av8ramit/tensorflow
|
tensorflow/python/ops/linalg/linear_operator_full_matrix.py
|
Python
|
apache-2.0
| 6,505 | 0.001845 |
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from api import command, message, plugin
def onInit(plugin_in):
gitsrc_command = command.Command(plugin_in, 'source', shortdesc='Get the git repo for the bot!')
docs_command = command.Command(plugin_in, 'docs', shortdesc='Get a link to the bot\'s documentation')
tests_command = command.Command(plugin_in, 'tests', shortdesc='Get a link to the bot\'s tests')
return plugin.Plugin(plugin_in, 'tinyurl', [gitsrc_command, docs_command, tests_command])
async def onCommand(message_in):
if message_in.command == 'source':
return message.Message(body="https://github.com/StarbotDiscord/Starbot")
if message_in.command == 'docs':
return message.Message(body="http://starbot.readthedocs.io/en/latest/")
if message_in.command == 'tests':
return message.Message(body="https://travis-ci.org/StarbotDiscord/Starbot")
|
dhinakg/BitSTAR
|
plugins/srcutils.py
|
Python
|
apache-2.0
| 1,479 | 0.008114 |
# ===============================================================================
# Copyright 2018 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pychron.pipeline.editors.audit_editor import AuditEditor
from pychron.pipeline.nodes.base import BaseNode
class AuditNode(BaseNode):
auto_configure = False
name = 'Audit'
configurable = False
def run(self, state):
editor = AuditEditor()
editor.set_unks_refs(state.unknowns, state.references)
state.editors.append(editor)
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/pipeline/nodes/audit.py
|
Python
|
apache-2.0
| 1,164 | 0 |
import vtk
from vtk.util.misc import vtkGetDataRoot
import render_mpr
reader = vtk.vtkMetaImageReader()
reader.SetFileName("C:\\Users\\fei.wang\\PycharmProjects\\Rendering\\data\\org.mha")
reader.Update()
render_mpr = render_mpr.RendererMPR()
render_mpr.set_volume(reader)
render_mpr.set_output_image_size(1024, 1024)
render_mpr.set_lut_property("")
render_mpr.render()
render_mpr.get_output_png_image("1.png")
# test Camera
cur_camera = render_mpr.get_camera()
print("cur_camera:" + str(cur_camera))
render_mpr.set_camera(cur_camera)
render_mpr.render()
|
comedate/VolumeRendering
|
test_mpr.py
|
Python
|
mit
| 580 | 0.003448 |
from pprint import pprint
VAR = 42
def foo():
import sys
import ast, tokenize
pass
class C:
from textwrap import dedent
pass
import codecs as C
pass
|
asedunov/intellij-community
|
python/testData/formatter/noBlankLinesAfterLocalImports_after.py
|
Python
|
apache-2.0
| 182 | 0.005495 |
import string
__version__ = string.split('$Revision: 1.6 $')[1]
__date__ = string.join(string.split('$Date: 2001/11/17 14:12:34 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <twburton@users.sourceforge.net>'
__doc__ = 'http://oss.sgi.com/projects/ogl-sample/registry/SUN/convolution_border_modes.txt'
__api_version__ = 0x103
GL_WRAP_BORDER_SUN = 0x81D4
def glInitConvolutionBorderModesSUN():
from OpenGL.GL import __has_extension
return __has_extension("GL_SUN_convolution_border_modes")
def __info():
if glInitConvolutionBorderModesSUN():
return []
|
fxia22/ASM_xf
|
PythonD/site_python/OpenGL/GL/SUN/convolution_border_modes.py
|
Python
|
gpl-2.0
| 585 | 0.011966 |
from dal import autocomplete
from django.conf.urls import url
from django.views import generic
from .forms import TestForm
from .models import TestModel
urlpatterns = [
url(
'test-autocomplete/$',
autocomplete.Select2QuerySetView.as_view(
model=TestModel,
create_field='name',
),
name='select2_one_to_one_autocomplete',
),
url(
'test/(?P<pk>\d+)/$',
generic.UpdateView.as_view(
model=TestModel,
form_class=TestForm,
)
),
]
|
luzfcb/django-autocomplete-light
|
test_project/select2_one_to_one/urls.py
|
Python
|
mit
| 547 | 0.001828 |
from urllib.request import urlopen
from urllib.parse import urlparse, parse_qs
from socket import error as SocketError
import errno
from bs4 import BeautifulSoup
MAX_PAGES_TO_SEARCH = 3
def parse_news(item):
'''Parse news item
return is a tuple(id, title, url)
'''
url = 'http://www.spa.gov.sa' + item['href']
url_parsed = urlparse(url)
qs = parse_qs(url_parsed[4])
id = qs['newsid'][0]
title = item.h2.contents[0]
title = " ".join(title.split())
item_parsed = (id, title, url)
return item_parsed
def retrieve_news(person=0, royal=0, cabinet=0, last_id=-1):
'''Retrieve news for person or royal
person 1= king, 2= crown prince and 3= deputy crown prince
if royal is = 1 news will be retriveved
if last_id not definend it will return the max
return list of news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
[(id, title, url)...]
'''
all_news = []
found = False
page = 1
while (page <= MAX_PAGES_TO_SEARCH and not found):
url = ("http://www.spa.gov.sa/ajax/listnews.php?sticky={}&cat=0&cabine"
"t={}&royal={}&lang=ar&pg={}".format(person, cabinet, royal, page))
try:
html = urlopen(url)
soup = BeautifulSoup(html, "html.parser")
news = soup.find_all("a", class_="aNewsTitle")
for item in news:
item_parsed = parse_news(item)
if item_parsed[0] <= str(last_id):
found = True
break
all_news.append(item_parsed)
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise
pass
page = page + 1
return all_news
def retrieve_detail(item):
'''Retrive detaill for news item
return is tuple (id, title, url, text)
'''
url = item[2]
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
detail = soup.find(class_='divNewsDetailsText')
detail = detail.get_text()
_list = list(item)
_list.insert(3, detail)
item = tuple(_list)
return item
def royal_order(last_id=-1):
'''Retrive royal orders
if last_id not defiend it will return the max
return list of royal orders tuples up to MAX_PAGES_TO_SEARCH (page=10)
[(id, title, url, text)...]
'''
orders = []
_news = retrieve_news(royal=1, last_id=last_id)
for item in _news:
_detail = retrieve_detail(item)
orders.append(_detail)
return orders
def cabinet_decision(last_id=-1):
'''Retrive cabinet decisions
if last_id not defiend it will return the max
return list of cabinet decisions tuples up to MAX_PAGES_TO_SEARCH (page=10)
[(id, title, url, text)...]
'''
decisions = []
_news = retrieve_news(cabinet=1, last_id=last_id)
for item in _news:
_detail = retrieve_detail(item)
decisions.append(_detail)
return decisions
def arrival_news(person, last_id=-1):
'''Retrive only arrival news for person
if last_id not defiend it will return the max
return list of arrival news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
[(id, title, url, location)...]
'''
arrival_news = []
all_news = retrieve_news(person=person, last_id= last_id)
for item in all_news:
if 'يصل إلى' in item[1]:
_list = list(item)
_list.insert(3, (item[1].split('يصل إلى'))[1].split('قادماً من')[0])
item = tuple(_list)
arrival_news.append(item)
return arrival_news
def leave_news(person, last_id=-1):
'''Retrive only leave news for person
if last_id not defiend it will return the max
return list of leave news tuples up to MAX_PAGES_TO_SEARCH (page = 10 news)
[(id, title, url, locationFromTo)...]
'''
leave_news = []
all_news = retrieve_news(person=person, last_id= last_id)
for item in all_news:
if 'يغادر' in item[1]:
_list = list(item)
_list.insert(3, item[1].split('يغادر')[1])
item = tuple(_list)
leave_news.append(item)
return leave_news
if __name__ == "__main__":
# just for testing
news = cabinet_decision()
print(news)
|
saudisproject/saudi-bots
|
bots/spa.py
|
Python
|
gpl-3.0
| 4,254 | 0.002367 |
nothing = '90052'
while True:
f = open('channel/' + nothing + '.txt', 'r')
line = f.readline()
splits = line.split('Next nothing is ', 1)
if(len(splits) == 2):
nothing = splits[1]
print nothing
else:
break
|
cjwfuller/python-challenge
|
level6.py
|
Python
|
mit
| 250 | 0 |
"""
In the Core module you can find all basic classes and functions which form the backbone of the toolbox.
"""
import warnings
import numbers
import numpy as np
import numpy.ma as ma
import collections
from copy import copy, deepcopy
from numbers import Number
from scipy import integrate
from scipy.linalg import block_diag
from scipy.optimize import root
from scipy.interpolate import interp1d, interp2d, RectBivariateSpline, RegularGridInterpolator
from .registry import get_base
__all__ = ["Domain", "EvalData", "Parameters",
"Base", "BaseFraction", "StackedBase",
"Function", "ConstantFunction", "ComposedFunctionVector",
"find_roots", "sanitize_input", "real", "dot_product_l2",
"normalize_base", "project_on_base", "change_projection_base",
"back_project_from_base",
"calculate_scalar_product_matrix",
"calculate_base_transformation_matrix",
"calculate_expanded_base_transformation_matrix",
]
def sanitize_input(input_object, allowed_type):
"""
Sanitizes input data by testing if *input_object* is an array of type *allowed_type*.
Args:
input_object: Object which is to be checked.
allowed_type: desired type
Return:
input_object
"""
input_object = np.atleast_1d(input_object)
for obj in np.nditer(input_object, flags=["refs_ok"]):
if not isinstance(obj.item(), allowed_type):
raise TypeError("Only objects of type: {0} accepted.".format(allowed_type))
return input_object
class BaseFraction:
"""
Abstract base class representing a basis that can be used to describe functions of several variables.
"""
def __init__(self, members):
self.members = members
def scalar_product_hint(self):
"""
Empty Hint that can return steps for scalar product calculation.
Note:
Overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`
"""
pass
def function_space_hint(self):
"""
Empty Hint that can return properties which uniquely define
the function space of the :py:class:`.BaseFraction`.
Note:
Overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`.
"""
pass
def derive(self, order):
"""
Basic implementation of derive function.
Empty implementation, overwrite to use this functionality.
For an example implementation see :py:class:`.Function`
Args:
order (:class:`numbers.Number`): derivative order
Return:
:py:class:`.BaseFraction`: derived object
"""
if order == 0:
return self
else:
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def scale(self, factor):
"""
Factory method to obtain instances of this base fraction, scaled by the
given factor. Empty function, overwrite to implement custom
functionality. For an example implementation see :py:class:`.Function`.
Args:
factor: Factor to scale the vector.
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def raise_to(self, power):
"""
Raises this fraction to the given *power*.
Args:
power (:obj:`numbers.Number`): power to raise the fraction onto
Return:
raised fraction
"""
if power == 1:
return self
else:
raise NotImplementedError("Implement this functionality to make use of it.")
def get_member(self, idx):
"""
Getter function to access members.
Empty function, overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`
Note:
Empty function, overwrite to implement custom functionality.
Args:
idx: member index
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def __call__(self, *args, **kwargs):
"""
Spatial evaluation of the base fraction.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def add_neutral_element(self):
"""
Return the neutral element of addition for this object.
In other words: `self + ret_val == self`.
"""
raise NotImplementedError()
def mul_neutral_element(self):
"""
Return the neutral element of multiplication for this object.
In other words: `self * ret_val == self`.
"""
raise NotImplementedError()
def evaluation_hint(self, values):
"""
If evaluation can be accelerated by using special properties of a function, this function can be
overwritten to performs that computation. It gets passed an array of places where the caller
wants to evaluate the function and should return an array of the same length, containing the results.
Note:
This implementation just calls the normal evaluation hook.
Args:
values: places to be evaluated at
Returns:
numpy.ndarray: Evaluation results.
"""
return self(values)
class Function(BaseFraction):
"""
Most common instance of a :py:class:`.BaseFraction`.
This class handles all tasks concerning derivation and evaluation of
functions. It is used broad across the toolbox and therefore incorporates
some very specific attributes. For example, to ensure the accurateness of
numerical handling functions may only evaluated in areas where they provide
nonzero return values. Also their domain has to be taken into account.
Therefore the attributes *domain* and *nonzero* are provided.
To save implementation time, ready to go version like
:py:class:`.LagrangeFirstOrder` are provided in the
:py:mod:`pyinduct.simulation` module.
For the implementation of new shape functions subclass this implementation
or directly provide a callable *eval_handle* and callable
*derivative_handles* if spatial derivatives are required for the
application.
Args:
eval_handle (callable): Callable object that can be evaluated.
domain((list of) tuples): Domain on which the eval_handle is defined.
nonzero(tuple): Region in which the eval_handle will return
nonzero output. Must be a subset of *domain*
derivative_handles (list): List of callable(s) that contain
derivatives of eval_handle
"""
# TODO: overload add and mul operators
def __init__(self, eval_handle, domain=(-np.inf, np.inf), nonzero=(-np.inf, np.inf), derivative_handles=None):
super().__init__(None)
self._vectorial = False
self._function_handle = None
self._derivative_handles = None
self.domain = set()
self.nonzero = set()
for kw, val in zip(["domain", "nonzero"], [domain, nonzero]):
if not isinstance(val, set):
if isinstance(val, tuple):
val = {val}
else:
raise TypeError("(Set of) or tuple(s) has to be provided "
"for {0}".format(kw))
setattr(self, kw, domain_simplification(val))
self.function_handle = eval_handle
self.derivative_handles = derivative_handles
@property
def derivative_handles(self):
return self._derivative_handles
@derivative_handles.setter
def derivative_handles(self, eval_handle_derivatives):
if eval_handle_derivatives is None:
eval_handle_derivatives = []
if not isinstance(eval_handle_derivatives, collections.abc.Iterable):
eval_handle_derivatives = [eval_handle_derivatives]
for der_handle in eval_handle_derivatives:
if not isinstance(der_handle, collections.Callable):
raise TypeError("derivative_handles must be callable")
self._derivative_handles = eval_handle_derivatives
@property
def function_handle(self):
return self._function_handle
@function_handle.setter
def function_handle(self, eval_handle):
# handle must be callable
if not isinstance(eval_handle, collections.Callable):
raise TypeError("callable has to be provided as function_handle")
# handle must return scalar when called with scalar
test_value = next(iter(self.domain))[1]
if test_value is np.inf:
test_value = 1
if not isinstance(eval_handle(test_value), Number):
print(test_value)
print(type(eval_handle(test_value)))
raise TypeError("callable must return number when called with scalar")
self._function_handle = eval_handle
# test vectorial input
test_data = np.array([test_value] * 10)
try:
res = eval_handle(test_data)
except BaseException as e:
# looks like the function does _not_ handle vectorial input
self._vectorial = False
return
if not isinstance(res, np.ndarray):
# raise TypeError("callable must return np.ndarray when called with vector")
self._vectorial = False
return
if res.shape != test_data.shape:
# raise TypeError("result of call with vector must be of same shape")
self._vectorial = False
return
self._vectorial = True
def _check_domain(self, values):
"""
Checks if values fit into domain.
Args:
values (array_like): Point(s) where function shall be evaluated.
Raises:
ValueError: If values exceed the domain.
"""
values = np.atleast_1d(values)
if values.dtype == complex:
raise TypeError("Only real valued arguments considered for "
"pyinduct function. \nProvide value: {}\n"
"Data type: {}".format(values, values.dtype))
mask = np.full(len(values), False)
for interval in self.domain:
d_mask = np.logical_and(values >= interval[0],
values <= interval[1])
np.logical_or(mask, d_mask, out=mask)
if not all(mask):
raise ValueError("Function evaluated outside it's domain {} with {}"
"".format(self.domain,
values[np.logical_not(mask)]))
def __call__(self, argument):
"""
Handle that is used to evaluate the function on a given point.
Args:
argument: Function parameter
Return:
function value
"""
self._check_domain(argument)
if self._vectorial:
if not isinstance(argument, np.ndarray):
# a little convenience helper here
argument = np.array(argument)
return self._function_handle(argument)
else:
try:
ret_val = []
for arg in argument:
ret_val.append(self._function_handle(arg))
return np.array(ret_val)
except TypeError as e:
return self._function_handle(argument)
def get_member(self, idx):
"""
Implementation of the abstract parent method.
Since the :py:class:`.Function` has only one member (itself) the
parameter *idx* is ignored and *self* is returned.
Args:
idx: ignored.
Return:
self
"""
return self
def raise_to(self, power):
"""
Raises the function to the given *power*.
Warning:
Derivatives are lost after this action is performed.
Args:
power (:obj:`numbers.Number`): power to raise the function to
Return:
raised function
"""
if power == 1:
return self
def raise_factory(func):
def _raised_func(z):
return np.power(func(z), power)
return _raised_func
new_obj = deepcopy(self)
new_obj.derivative_handles = None
new_obj.function_handle = raise_factory(self.function_handle)
return new_obj
def scale(self, factor):
"""
Factory method to scale a :py:class:`.Function`.
Args:
factor : :obj:`numbers.Number` or a callable.
"""
if factor == 1:
return self
def scale_factory(func):
def _scaled_func(z):
if isinstance(factor, collections.Callable):
return factor(z) * func(z)
else:
return factor * func(z)
return _scaled_func
new_obj = deepcopy(self)
if isinstance(factor, collections.Callable):
# derivatives are lost
new_obj.derivative_handles = None
new_obj.function_handle = scale_factory(self._function_handle)
else:
# derivatives can be scaled
new_obj.derivative_handles = [scale_factory(der_handle) for der_handle in self.derivative_handles]
new_obj.function_handle = scale_factory(self._function_handle)
return new_obj
def derive(self, order=1):
r"""
Spatially derive this :py:class:`.Function`.
This is done by neglecting *order* derivative handles and to select
handle :math:`\text{order} - 1` as the new evaluation_handle.
Args:
order (int): the amount of derivations to perform
Raises:
TypeError: If *order* is not of type int.
ValueError: If the requested derivative order is higher than the
provided one.
Returns:
:py:class:`.Function` the derived function.
"""
if not isinstance(order, int):
raise TypeError("only integer allowed as derivation order")
if order == 0:
return self
if order < 0 or order > len(self.derivative_handles):
raise ValueError("function cannot be differentiated that often.")
new_obj = deepcopy(self)
new_obj.derivative_handles = self.derivative_handles[order - 1:]
new_obj.function_handle = new_obj.derivative_handles.pop(0)
return new_obj
def scalar_product_hint(self):
"""
Return the hint that the :py:func:`._dot_product_l2` has to
calculated to gain the scalar product.
"""
return dot_product_l2
def function_space_hint(self):
"""
Return the hint that this function is an element of the
an scalar product space which is uniquely defined by
the scalar product :py:meth:`.scalar_product_hint`.
Note:
If you are working on different function spaces, you have
to overwrite this hint in order to provide more properties
which characterize your specific function space. For
example the domain of the functions.
"""
return self.scalar_product_hint(), self.domain
@staticmethod
def from_data(x, y, **kwargs):
"""
Create a :py:class:`.Function` based on discrete data by
interpolating.
The interpolation is done by using :py:class:`interp1d` from scipy,
the *kwargs* will be passed.
Args:
x (array-like): Places where the function has been evaluated .
y (array-like): Function values at *x*.
**kwargs: all kwargs get passed to :py:class:`.Function` .
Returns:
:py:class:`.Function`: An interpolating function.
"""
dom = kwargs.pop("domain", (min(x), max(x)))
nonzero = kwargs.pop("nonzero", dom)
der_handles = kwargs.pop("derivative_handles", None)
interp = interp1d(x, y, **kwargs)
# TODO fix this behaviour
def wrapper(z):
res = interp(z)
if res.size == 1:
return np.float(res)
return res
func = Function(eval_handle=wrapper,
domain=dom,
nonzero=nonzero,
derivative_handles=der_handles)
return func
def add_neutral_element(self):
return ConstantFunction(0, domain=self.domain)
def mul_neutral_element(self):
return ConstantFunction(1, domain=self.domain)
class ConstantFunction(Function):
"""
A :py:class:`.Function` that returns a constant value.
This function can be differentiated without limits.
Args:
constant (number): value to return
Keyword Args:
**kwargs: All other kwargs get passed to :py:class:`.Function`.
"""
def __init__(self, constant, **kwargs):
self._constant = constant
func_kwargs = dict(eval_handle=self._constant_function_handle)
if "nonzero" in kwargs:
if constant == 0:
if kwargs["nonzero"] != set():
raise ValueError("Constant Function with constant 0 must have an"
" empty set nonzero area.")
if "domain" in kwargs:
if kwargs["nonzero"] != kwargs["domain"]:
raise ValueError(
"Constant Function is expected to be constant on the complete "
"domain. Nonzero argument is prohibited")
else:
func_kwargs["domain"] = kwargs["nonzero"]
func_kwargs["nonzero"] = kwargs["nonzero"]
else:
if "domain" in kwargs:
func_kwargs["domain"] = kwargs["domain"]
func_kwargs["nonzero"] = kwargs["domain"]
if constant == 0:
func_kwargs["nonzero"] = set()
if "derivative_handles" in kwargs:
warnings.warn(
"Derivative handles passed to ConstantFunction are discarded")
super().__init__( **func_kwargs)
def _constant_function_handle(self, z):
return self._constant * np.ones_like(z)
def derive(self, order=1):
if not isinstance(order, int):
raise TypeError("only integer allowed as derivation order")
if order == 0:
return self
if order < 0:
raise ValueError("only derivative order >= 0 supported")
zero_func = deepcopy(self)
zero_func._constant = 0
zero_func.nonzero = set()
return zero_func
class ComposedFunctionVector(BaseFraction):
r"""
Implementation of composite function vector :math:`\boldsymbol{x}`.
.. math::
\boldsymbol{x} = \begin{pmatrix}
x_1(z) \\
\vdots \\
x_n(z) \\
\xi_1 \\
\vdots \\
\xi_m \\
\end{pmatrix}
"""
def __init__(self, functions, scalars):
funcs = sanitize_input(functions, Function)
scals = sanitize_input(scalars, Number)
BaseFraction.__init__(self, {"funcs": funcs, "scalars": scals})
def __call__(self, arguments):
f_res = np.array([f(arguments) for f in self.members["funcs"]])
s_res = self.members["scalars"]
if f_res.ndim > 1:
s_res = s_res[:, None] * np.ones_like(f_res)
res = np.concatenate((f_res, s_res))
return res
def scalar_product_hint(self):
func_hints = [f.scalar_product_hint() for f in self.members["funcs"]]
scalar_hints = [dot_product for s in self.members["scalars"]]
return func_hints + scalar_hints
def function_space_hint(self):
"""
Return the hint that this function is an element of the
an scalar product space which is uniquely defined by
* the scalar product
:py:meth:`.ComposedFunctionVector.scalar_product`
* :code:`len(self.members["funcs"])` functions
* and :code:`len(self.members["scalars"])` scalars.
"""
func_hints = [f.function_space_hint() for f in self.members["funcs"]]
scalar_hints = [dot_product for s in self.members["scalars"]]
return func_hints + scalar_hints
def get_member(self, idx):
if idx < len(self.members["funcs"]):
return self.members["funcs"][idx]
elif idx - len(self.members["funcs"]) < len(self.members["scalars"]):
return self.members["scalars"][idx - len(self.members["funcs"])]
else:
raise ValueError("wrong index")
def scale(self, factor):
if isinstance(factor, ComposedFunctionVector):
if not len(self.members["funcs"]) == len(factor.members["funcs"]):
raise ValueError
if not len(self.members["scalars"]) == len(factor.members["scalars"]):
raise ValueError
return self.__class__(np.array(
[func.scale(scale) for func, scale in
zip(self.members["funcs"], factor.members["funcs"])]),
[scalar * scale for scalar, scale in
zip(self.members["scalars"], factor.members["scalars"])],
)
elif isinstance(factor, Number):
return self.__class__(
np.array([func.scale(factor) for func in self.members["funcs"]]),
np.array([scal * factor for scal in self.members["scalars"]])
)
else:
raise TypeError("ComposedFunctionVector can only be scaled with "
"compatible ComposedFunctionVector of with a"
"constant scalar")
def mul_neutral_element(self):
"""
Create neutral element of multiplication that is compatible to this
object.
Returns: Comp. Function Vector with constant functions returning 1 and
scalars of value 1.
"""
funcs = [f.mul_neutral_element() for f in self.members["funcs"]]
scalar_constants = [1 for f in self.members["scalars"]]
neut = ComposedFunctionVector(funcs, scalar_constants)
return neut
def add_neutral_element(self):
"""
Create neutral element of addition that is compatible to this
object.
Returns: Comp. Function Vector with constant functions returning 0 and
scalars of value 0.
"""
funcs = [f.add_neutral_element() for f in self.members["funcs"]]
scalar_constants = [0 for f in self.members["scalars"]]
neut = ComposedFunctionVector(funcs, scalar_constants)
return neut
class ConstantComposedFunctionVector(ComposedFunctionVector):
r"""
Constant composite function vector :math:`\boldsymbol{x}`.
.. math::
\boldsymbol{x} = \begin{pmatrix}
z \mapsto x_1(z) = c_1 \\
\vdots \\
z \mapsto x_n(z) = c_n \\
d_1 \\
\vdots \\
c_n \\
\end{pmatrix}
Args:
func_constants (array-like): Constants for the functions.
scalar_constants (array-like): The scalar constants.
**func_kwargs: Keyword args that are passed to the ConstantFunction.
"""
def __init__(self, func_constants, scalar_constants, **func_kwargs):
func_constants = sanitize_input(func_constants, Number)
scalars = sanitize_input(scalar_constants, Number)
funcs = [ConstantFunction(c, **func_kwargs) for c in func_constants]
super().__init__(funcs, scalars)
class ApproximationBasis:
"""
Base class for an approximation basis.
An approximation basis is formed by some objects on which given distributed
variables may be projected.
"""
def scalar_product_hint(self):
"""
Hint that returns steps for scalar product calculation with elements of
this base.
Note:
Overwrite to implement custom functionality.
"""
raise NotImplementedError()
def function_space_hint(self):
"""
Hint that returns properties that characterize the functional
space of the fractions.
It can be used to determine if function spaces match.
Note:
Overwrite to implement custom functionality.
"""
raise NotImplementedError()
def is_compatible_to(self, other):
"""
Helper functions that checks compatibility between two approximation
bases.
In this case compatibility is given if the two bases live in the same
function space.
Args:
other (:py:class:`.Approximation Base`): Approximation basis to
compare with.
Returns: True if bases match, False if they do not.
"""
return self.function_space_hint() == other.function_space_hint()
class Base(ApproximationBasis):
"""
Base class for approximation bases.
In general, a :py:class:`.Base` is formed by a certain amount of
:py:class:`.BaseFractions` and therefore forms finite-dimensional subspace
of the distributed problem's domain. Most of the time, the user does not
need to interact with this class.
Args:
fractions (iterable of :py:class:`.BaseFraction`): List, array or
dict of :py:class:`.BaseFraction`'s
matching_base_lbls (list of str): List of labels from exactly matching
bases, for which no transformation is necessary.
Useful for transformations from bases that 'live' in
different function spaces but evolve with the same time
dynamic/coefficients (e.g. modal bases).
intermediate_base_lbls (list of str): If it is certain that this base
instance will be asked (as destination base) to return a
transformation to a source base, whose implementation is
cumbersome, its label can be provided here. This will trigger the
generation of the transformation using build-in features.
The algorithm, implemented in :py:class:`.get_weights_transformation`
is then called again with the intermediate base as destination base
and the 'old' source base. With this technique arbitrary long
transformation chains are possible, if the provided intermediate
bases again define intermediate bases.
"""
def __init__(self, fractions,
matching_base_lbls=None, intermediate_base_lbls=None):
fractions = sanitize_input(fractions, BaseFraction)
# check type
base_space = fractions[0].function_space_hint()
if not all([frac.function_space_hint() == base_space
for frac in fractions]):
raise ValueError("Provided fractions must be compatible!")
self.fractions = fractions
self.matching_base_lbls = matching_base_lbls
if self.matching_base_lbls is None:
self.matching_base_lbls = []
if isinstance(self.matching_base_lbls, str):
self.matching_base_lbls = [self.matching_base_lbls]
self.intermediate_base_lbls = intermediate_base_lbls
if self.intermediate_base_lbls is None:
self.intermediate_base_lbls = []
if isinstance(self.intermediate_base_lbls, str):
self.intermediate_base_lbls = [self.intermediate_base_lbls]
def __iter__(self):
return iter(self.fractions)
def __len__(self):
return len(self.fractions)
def __getitem__(self, item):
return self.fractions[item]
@staticmethod
def _transformation_factory(info, equivalent=False):
mat = calculate_expanded_base_transformation_matrix(info.src_base,
info.dst_base,
info.src_order,
info.dst_order,
use_eye=equivalent)
def handle(weights):
return np.dot(mat, weights)
return handle
def transformation_hint(self, info):
"""
Method that provides a information about how to transform weights from
one :py:class:`.BaseFraction` into another.
In Detail this function has to return a callable, which will take the
weights of the source- and return the weights of the target system. It
may have keyword arguments for other data which is required to perform
the transformation. Information about these extra keyword arguments
should be provided in form of a dictionary whose keys are keyword
arguments of the returned transformation handle.
Note:
This implementation covers the most basic case, where the two
:py:class:`.BaseFraction`'s are of same type. For any other case it
will raise an exception. Overwrite this Method in your
implementation to support conversion between bases that differ from
yours.
Args:
info: :py:class:`.TransformationInfo`
Raises:
NotImplementedError:
Returns:
Transformation handle
"""
if info.src_lbl == info.dst_lbl:
# trivial case
return self._transformation_factory(info, equivalent=True), None
# check for matching bases
match_cond_src = (self is info.src_base
and info.dst_lbl in self.matching_base_lbls)
match_cond_dst = (self is info.dst_base
and info.src_lbl in self.matching_base_lbls)
if match_cond_src or match_cond_dst:
# bases are a match
if len(info.dst_base) != len(info.src_base):
msg = "Base length mismatch: len({})={} != len({})={}"
raise ValueError(msg.format(info.src_lbl, len(info.src_base),
info.dst_lbl, len(info.dst_base)))
if info.src_order >= info.dst_order:
# forward weights
return self._transformation_factory(info, True), None
# check for compatible base
compat_cond_src = (self is info.src_base
and self.is_compatible_to(info.dst_base))
compat_cond_dst = (self is info.dst_base
and self.is_compatible_to(info.src_base))
if compat_cond_src or compat_cond_dst:
# bases are compatible, use standard approach
return self._transformation_factory(info), None
if self.intermediate_base_lbls is not None:
# try intermediate bases
for inter_lbl in self.intermediate_base_lbls:
trafo, hint = self._get_intermediate_transform(info, inter_lbl)
if trafo is not None:
return trafo, hint
# No Idea what to do.
return None, None
def _get_intermediate_transform(self, info, inter_lbl):
if self is info.src_base:
# build trafo from us to middleman
intermediate_info = get_transformation_info(
info.src_lbl, inter_lbl,
info.src_order, info.src_order
)
handle = get_weight_transformation(intermediate_info)
if info.dst_lbl == inter_lbl:
# middleman is the source -> we are finished
return handle, None
# create hint from middleman to dst
hint = get_transformation_info(
inter_lbl, info.dst_lbl,
info.src_order, info.dst_order
)
return handle, hint
if self is info.dst_base:
# build trafo from middleman to us
intermediate_info = get_transformation_info(
inter_lbl, info.dst_lbl,
info.src_order, info.dst_order
)
handle = get_weight_transformation(intermediate_info)
if info.src_lbl == inter_lbl:
# middleman is the source -> we are finished
return handle, None
# create hint from src to middleman
hint = get_transformation_info(
info.src_lbl, inter_lbl,
info.src_order, info.src_order
)
return handle, hint
# No Idea what to do.
return None, None
def scalar_product_hint(self):
"""
Hint that returns steps for scalar product calculation with elements of
this base.
Note:
Overwrite to implement custom functionality.
"""
return self.fractions[0].scalar_product_hint()
def function_space_hint(self):
"""
Hint that returns properties that characterize the functional
space of the fractions.
It can be used to determine if function spaces match.
Note:
Overwrite to implement custom functionality.
"""
return self.fractions[0].function_space_hint()
def derive(self, order):
"""
Basic implementation of derive function.
Empty implementation, overwrite to use this functionality.
Args:
order (:class:`numbers.Number`): derivative order
Return:
:py:class:`.Base`: derived object
"""
if order == 0:
return self
else:
return self.__class__([f.derive(order) for f in self.fractions])
def scale(self, factor):
"""
Factory method to obtain instances of this base, scaled by the given factor.
Args:
factor: factor or function to scale this base with.
"""
if factor == 1:
return self
else:
return self.__class__([f.scale(factor) for f in self.fractions])
def raise_to(self, power):
"""
Factory method to obtain instances of this base, raised by the given power.
Args:
power: power to raise the basis onto.
"""
if power == 1:
return self
else:
raise ValueError("This funcionality is deprecated.")
def get_attribute(self, attr):
"""
Retrieve an attribute from the fractions of the base.
Args:
attr(str): Attribute to query the fractions for.
Returns:
:py:class:`np.ndarray`: Array of ``len(fractions)`` holding the
attributes. With `None` entries if the attribute is missing.
"""
return np.array([getattr(frac, attr, None) for frac in self.fractions])
class StackedBase(ApproximationBasis):
"""
Implementation of a basis vector that is obtained by stacking different
bases onto each other. This typically occurs when the bases of coupled
systems are joined to create a unified system.
Args:
base_info (OrderedDict): Dictionary with `base_label` as keys and
dictionaries holding information about the bases as values.
In detail, these Information must contain:
- sys_name (str): Name of the system the base is associated with.
- order (int): Highest temporal derivative order with which the
base shall be represented in the stacked base.
- base (:py:class:`.ApproximationBase`): The actual basis.
"""
def __init__(self, base_info):
self.base_lbls = []
self.system_names = []
self.orders = []
self._bases = []
self._cum_frac_idxs = [0]
self._cum_weight_idxs = [0]
for lbl, info in base_info.items():
# public properties
self.base_lbls.append(lbl)
self.system_names.append(info["sys_name"])
order = info["order"]
self.orders.append(order)
base = info["base"]
# internal properties
self._bases.append(base)
self._cum_frac_idxs.append(self._cum_frac_idxs[-1] + len(base))
self._cum_weight_idxs.append(self._cum_weight_idxs[-1]
+ (order + 1) * len(base))
self.fractions = np.concatenate([b.fractions for b in self._bases])
self._size = self._cum_frac_idxs.pop(-1)
self._weight_size = self._cum_weight_idxs.pop(-1)
def scalar_product_hint(self):
return NotImplemented
def function_space_hint(self):
return hash(self)
def is_compatible_to(self, other):
return False
def scale(self, factor):
raise NotImplementedError("Stacked base should not be scaled.")
def transformation_hint(self, info):
"""
If *info.src_lbl* is a member, just return it, using to correct
derivative transformation, otherwise return `None`
Args:
info (:py:class:`.TransformationInfo`): Information about the
requested transformation.
Return:
transformation handle
"""
if info.src_order != 0:
# this can be implemented but is not really meaningful
return None, None
# we only know how to get from a stacked base to one of our parts
if info.src_base != self:
return None, None
if info.dst_lbl not in self.base_lbls:
return None, None
# check maximum available derivative order
dst_idx = self.base_lbls.index(info.dst_lbl)
init_src_ord = self.orders[dst_idx]
if info.dst_order > init_src_ord:
return None, None
# get transform
trans_mat = calculate_expanded_base_transformation_matrix(
info.dst_base,
info.dst_base,
init_src_ord,
info.dst_order,
use_eye=True)
start_idx = self._cum_weight_idxs[dst_idx]
length = (init_src_ord + 1) * len(self._bases[dst_idx])
def selection_func(weights):
assert len(weights) == self._weight_size
return trans_mat @ weights[start_idx: start_idx + length]
return selection_func, None
def domain_simplification(domain):
"""
Simplify a domain, given by possibly overlapping subdomains.
Args:
domain (set): Set of tuples, defining the (start, end) points of the
subdomains.
Returns:
list: Simplified domain.
"""
new_dom = set()
temp_dom = list()
# sort sub domains
for idx, sub_dom in enumerate(domain):
if sub_dom[0] > sub_dom[1]:
temp_dom.append(sub_dom[::-1])
else:
temp_dom.append(sub_dom)
# look for overlapping sub domains
for s_idx, start_dom in enumerate(temp_dom):
candidates = []
for e_idx, end_dom in enumerate(temp_dom):
if s_idx == e_idx:
continue
if start_dom[0] > end_dom[0]:
# second one starts earlier, postpone
continue
if start_dom[1] > end_dom[0]:
# two domains overlap
candidates.append(e_idx)
if not candidates:
continue
greatest_idx = candidates[np.argmax([temp_dom[idx][1]
for idx in candidates])]
if start_dom[1] >= temp_dom[greatest_idx][1]:
# the second domain is a real sub set of the first one
# save only the first
new_dom.add(start_dom)
else:
# second one goes further -> join them
new_dom.add((start_dom[0], temp_dom[greatest_idx][1]))
if new_dom and new_dom != domain:
return domain_simplification(new_dom)
else:
return set(temp_dom)
def domain_intersection(first, second):
"""
Calculate intersection(s) of two domains.
Args:
first (set): (Set of) tuples defining the first domain.
second (set): (Set of) tuples defining the second domain.
Return:
set: Intersection given by (start, end) tuples.
"""
if isinstance(first, tuple):
first = [first]
if isinstance(first, set):
first = list(first)
if isinstance(second, tuple):
second = [second]
if isinstance(second, set):
second = list(second)
intersection = set()
first_idx = 0
second_idx = 0
last_first_idx = 0
last_second_idx = 0
last_first_upper = None
last_second_upper = None
while first_idx < len(first) and second_idx < len(second):
# TODO remove interval and boundary checking? should be done before
if last_first_upper is not None and first_idx is not last_first_idx:
if last_first_upper >= first[first_idx][0]:
raise ValueError("Intervals not ordered!")
if last_second_upper is not None and second_idx is not last_second_idx:
if last_second_upper >= second[second_idx][0]:
raise ValueError("Intervals not ordered!")
if first[first_idx][0] > first[first_idx][1]:
raise ValueError("Interval boundaries given in wrong order")
if second[second_idx][0] > second[second_idx][1]:
raise ValueError("Interval boundaries given in wrong order")
# backup for interval order check
last_first_idx = first_idx
last_second_idx = second_idx
last_first_upper = first[first_idx][1]
last_second_upper = second[second_idx][1]
# no common domain -> search
if second[second_idx][0] <= first[first_idx][0] <= second[second_idx][1]:
# common start found in 1st domain
start = first[first_idx][0]
elif first[first_idx][0] <= second[second_idx][0] <= first[first_idx][1]:
# common start found in 2nd domain
start = second[second_idx][0]
else:
# intervals have no intersection
first_idx += 1
continue
# add end
if first[first_idx][1] <= second[second_idx][1]:
end = first[first_idx][1]
first_idx += 1
else:
end = second[second_idx][1]
second_idx += 1
# complete domain found
if not np.isclose(start, end):
intersection.add((start, end))
return intersection
def integrate_function(func, interval):
"""
Numerically integrate a function on a given interval using
:func:`.complex_quadrature`.
Args:
func(callable): Function to integrate.
interval(list of tuples): List of (start, end) values of the intervals
to integrate on.
Return:
tuple: (Result of the Integration, errors that occurred during the
integration).
"""
result = 0
err = 0
for area in interval:
res = complex_quadrature(func, area[0], area[1])
result += res[0]
err += res[1]
return np.real_if_close(result), err
def complex_quadrature(func, a, b, **kwargs):
"""
Wraps the scipy.qaudpack routines to handle complex valued functions.
Args:
func (callable): function
a (:obj:`numbers.Number`): lower limit
b (:obj:`numbers.Number`): upper limit
**kwargs: Arbitrary keyword arguments for desired scipy.qaudpack routine.
Return:
tuple: (real part, imaginary part)
"""
def real_func(x):
return np.real(func(x))
def imag_func(x):
return np.imag(func(x))
real_integral = integrate.quad(real_func, a, b, **kwargs)
imag_integral = integrate.quad(imag_func, a, b, **kwargs)
return (real_integral[0] + 1j * imag_integral[0],
real_integral[1] + imag_integral[1])
def dot_product(first, second):
"""
Calculate the inner product of two vectors.
Args:
first (:obj:`numpy.ndarray`): first vector
second (:obj:`numpy.ndarray`): second vector
Return:
inner product
"""
return np.inner(first, second)
def dot_product_l2(first, second):
r"""
Calculate the inner product on L2.
Given two functions :math:`\varphi(z)` and :math:`\psi(z)` this functions
calculates
.. math::
\left< \varphi(z) | \psi(z) \right> =
\int\limits_{\Gamma_0}^{\Gamma_1}
\bar\varphi(\zeta) \psi(\zeta) \,\textup{d}\zeta \:.
Args:
first (:py:class:`.Function`): first function
second (:py:class:`.Function`): second function
Return:
inner product
"""
if not isinstance(first, Function) or not isinstance(second, Function):
raise TypeError("Wrong type(s) supplied. both must be a {0}".format(Function))
if not first.domain == second.domain:
raise ValueError("Domains of arguments must be identical, "
"but {} and {} were given".format(first.domain,
second.domain))
nonzero = domain_intersection(first.nonzero, second.nonzero)
areas = domain_intersection(first.domain, nonzero)
# try some shortcuts
if first == second:
if hasattr(first, "quad_int"):
return first.quad_int()
if 0:
# TODO let Function Class handle product to gain more speed
if type(first) is type(second):
pass
# standard case
def func(z):
"""
Take the complex conjugate of the first element and multiply it
by the second.
"""
return np.conj(first(z)) * second(z)
result, error = integrate_function(func, areas)
return result
def vectorize_scalar_product(first, second, scalar_product):
r"""
Call the given :code:`scalar_product` in a loop for the arguments
in :code:`left` and :code:`right`.
Given two vectors of functions
.. math::
\boldsymbol{\varphi}(z)
= \left(\varphi_0(z), \dotsc, \varphi_N(z)\right)^T
and
.. math::
\boldsymbol{\psi}(z) = \left(\psi_0(z), \dotsc, \psi_N(z)\right)^T` ,
this function computes
:math:`\left< \boldsymbol{\varphi}(z) | \boldsymbol{\psi}(z) \right>_{L2}`
where
.. math::
\left< \varphi_i(z) | \psi_j(z) \right>_{L2} =
\int\limits_{\Gamma_0}^{\Gamma_1}
\bar\varphi_i(\zeta) \psi_j(\zeta) \,\textup{d}\zeta \:.
Herein, :math:`\bar\varphi_i(\zeta)` denotes the complex conjugate and
:math:`\Gamma_0` as well as :math:`\Gamma_1` are derived by computing the
intersection of the nonzero areas of the involved functions.
Args:
first (callable or :obj:`numpy.ndarray`): (1d array of n) callable(s)
second (callable or :obj:`numpy.ndarray`): (1d array of n) callable(s)
Raises:
ValueError, if the provided arrays are not equally long.
Return:
numpy.ndarray: Array of inner products
"""
# sanitize input
first = np.atleast_1d(first)
second = np.atleast_1d(second)
try:
iter(scalar_product)
except TypeError:
scalar_product = (scalar_product, )
if len(first) != len(second):
raise ValueError("Provided function vectors must be of same length.")
# calculate output size and allocate output
out = np.zeros(first.shape, dtype=complex)
# TODO propagate vectorization into _dot_product_l2 to save this loop
# loop over entries
for idx, (f, s) in enumerate(zip(first, second)):
for m_idx, scal_prod in enumerate(scalar_product):
out[idx] += scal_prod(f.get_member(m_idx), s.get_member(m_idx))
return np.real_if_close(out)
def calculate_scalar_matrix(values_a, values_b):
"""
Convenience version of py:function:`calculate_scalar_product_matrix` with :py:func:`numpy.multiply` hardcoded as
*scalar_product_handle*.
Args:
values_a (numbers.Number or numpy.ndarray): (array of) value(s) for rows
values_b (numbers.Number or numpy.ndarray): (array of) value(s) for columns
Return:
numpy.ndarray: Matrix containing the pairwise products of the elements from *values_a* and *values_b*.
"""
return calculate_scalar_product_matrix(sanitize_input(values_a, Number),
sanitize_input(values_b, Number),
np.multiply)
def calculate_scalar_product_matrix(base_a, base_b, scalar_product=None,
optimize=True):
r"""
Calculates a matrix :math:`A` , whose elements are the scalar products of
each element from *base_a* and *base_b*, so that
:math:`a_{ij} = \langle \mathrm{a}_i\,,\: \mathrm{b}_j\rangle`.
Args:
base_a (:py:class:`.ApproximationBase`): Basis a
base_b (:py:class:`.ApproximationBase`): Basis b
scalar_product: (List of) function objects that are passed the members
of the given bases as pairs. Defaults to the scalar product given by
`base_a`
optimize (bool): Switch to turn on the symmetry based speed up.
For development purposes only.
Return:
numpy.ndarray: matrix :math:`A`
"""
if not base_a.is_compatible_to(base_b):
raise TypeError("Bases must be from the same type.")
if scalar_product is None:
scalar_product = base_a.scalar_product_hint()
fractions_a = base_a.fractions
fractions_b = base_b.fractions
if optimize and base_a == base_b:
# since the scalar_product commutes whe can save some operations
dim = fractions_a.shape[0]
output = np.zeros((dim, dim), dtype=np.complex)
i, j = np.mgrid[0:dim, 0:dim]
# compute only upper half
upper_idxs = np.triu_indices(dim)
i_upper = i[upper_idxs]
j_upper = j[upper_idxs]
output[upper_idxs] = vectorize_scalar_product(fractions_a[i_upper],
fractions_a[j_upper],
scalar_product)
# reconstruct using symmetry
output += np.conjugate(np.triu(output, 1)).T
return np.real_if_close(output)
else:
i, j = np.mgrid[0:fractions_a.shape[0],
0:fractions_b.shape[0]]
fractions_i = fractions_a[i]
fractions_j = fractions_b[j]
res = vectorize_scalar_product(fractions_i.flatten(),
fractions_j.flatten(),
scalar_product)
return res.reshape(fractions_i.shape)
def project_on_base(state, base):
"""
Projects a *state* on a basis given by *base*.
Args:
state (array_like): List of functions to approximate.
base (:py:class:`.ApproximationBase`): Basis to project onto.
Return:
numpy.ndarray: Weight vector in the given *base*
"""
if not isinstance(base, ApproximationBasis):
raise TypeError("Projection only possible on approximation bases.")
# compute <x(z, t), phi_i(z)> (vector)
projections = calculate_scalar_product_matrix(base.__class__(state),
base).squeeze()
# compute <phi_i(z), phi_j(z)> for 0 < i, j < n (matrix)
scale_mat = calculate_scalar_product_matrix(base, base)
res = np.linalg.inv(scale_mat) @ projections
return np.reshape(res, (scale_mat.shape[0],))
def project_on_bases(states, canonical_equations):
"""
Convenience wrapper for :py:func:`.project_on_base`.
Calculate the state, assuming it will be constituted by the dominant
base of the respective system. The keys from the dictionaries
*canonical_equations* and *states* must be the same.
Args:
states: Dictionary with a list of functions as values.
canonical_equations: List of :py:class:`.CanonicalEquation` instances.
Returns:
numpy.array: Finite dimensional state as 1d-array corresponding to the
concatenated dominant bases from *canonical_equations*.
"""
q0 = np.array([])
for ce in canonical_equations:
lbl = ce.dominant_lbl
q0 = np.hstack(tuple([q0] + [project_on_base(state, get_base(lbl))
for state in states[ce.name]]))
return q0
def back_project_from_base(weights, base):
"""
Build evaluation handle for a distributed variable that was approximated
as a set of *weights* om a certain *base*.
Args:
weights (numpy.ndarray): Weight vector.
base (:py:class:`.ApproximationBase`): Base to be used for the projection.
Return:
evaluation handle
"""
if isinstance(weights, Number):
weights = np.asarray([weights])
if weights.shape[0] != base.fractions.shape[0]:
raise ValueError("Lengths of weights and initial_initial_functions "
"do not match!")
def eval_handle(z):
res = sum([weights[i] * base.fractions[i](z)
for i in range(weights.shape[0])])
return real(res)
return eval_handle
def change_projection_base(src_weights, src_base, dst_base):
"""
Converts given weights that form an approximation using *src_base*
to the best possible fit using *dst_base*.
Args:
src_weights (numpy.ndarray): Vector of numbers.
src_base (:py:class:`.ApproximationBase`): The source Basis.
dst_base (:py:class:`.ApproximationBase`): The destination Basis.
Return:
:obj:`numpy.ndarray`: target weights
"""
pro_mat = calculate_base_transformation_matrix(src_base, dst_base)
return project_weights(pro_mat, src_weights)
def project_weights(projection_matrix, src_weights):
"""
Project *src_weights* on new basis using the provided *projection_matrix*.
Args:
projection_matrix (:py:class:`numpy.ndarray`): projection between
the source and the target basis;
dimension (m, n)
src_weights (:py:class:`numpy.ndarray`): weights in the source basis;
dimension (1, m)
Return:
:py:class:`numpy.ndarray`: weights in the target basis;
dimension (1, n)
"""
src_weights = sanitize_input(src_weights, Number)
return np.dot(projection_matrix, src_weights)
class TransformationInfo:
"""
Structure that holds information about transformations between different
bases.
This class serves as an easy to use structure to aggregate information,
describing transformations between different
:py:class:`.BaseFraction` s. It can be tested for equality to check the
equity of transformations and is hashable
which makes it usable as dictionary key to cache different transformations.
Attributes:
src_lbl(str): label of source basis
dst_lbl(str): label destination basis
src_base(:obj:`numpy.ndarray`): source basis in form of an array of
the source Fractions
dst_base(:obj:`numpy.ndarray`): destination basis in form of an
array of the destination Fractions
src_order: available temporal derivative order of source weights
dst_order: needed temporal derivative order for destination weights
"""
def __init__(self):
self.src_lbl = None
self.dst_lbl = None
self.src_base = None
self.dst_base = None
self.src_order = None
self.dst_order = None
def as_tuple(self):
return self.src_lbl, self.dst_lbl, self.src_order, self.dst_order
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
if not isinstance(other, TransformationInfo):
raise TypeError("Unknown type to compare with")
return self.as_tuple() == other.as_tuple()
def mirror(self):
"""
Factory method, that creates a new TransformationInfo object by
mirroring *src* and *dst* terms.
This helps handling requests to different bases.
"""
new_info = TransformationInfo()
new_info.src_lbl = self.dst_lbl
new_info.src_base = self.dst_base
new_info.src_order = self.dst_order
new_info.dst_lbl = self.src_lbl
new_info.dst_base = self.src_base
new_info.dst_order = self.src_order
return new_info
def get_weight_transformation(info):
"""
Create a handle that will transform weights from *info.src_base* into
weights for *info-dst_base* while paying respect to the given derivative
orders.
This is accomplished by recursively iterating through source and
destination bases and evaluating their :attr:`transformation_hints`.
Args:
info(:py:class:`.TransformationInfo`): information about the requested
transformation.
Return:
callable: transformation function handle
"""
# TODO since this lives in core now, get rid of base labels
# try to get help from the destination base
handle, hint = info.dst_base.transformation_hint(info)
if handle is None:
# try source instead
handle, hint = info.src_base.transformation_hint(info)
if handle is None:
raise TypeError(
("get_weight_transformation(): \n"
"You requested information about how to transform to '{0}'({1}) \n"
"from '{3}'({4}), furthermore the source derivative order is \n"
"{2} and the target one is {5}. No transformation could be \n"
"found, remember to implement your own 'transformation_hint' \n"
"method for non-standard bases.").format(
info.dst_lbl,
info.dst_base.__class__.__name__,
info.dst_order,
info.src_lbl,
info.src_base.__class__.__name__,
info.src_order,
))
# check termination criterion
if hint is None:
# direct transformation possible
return handle
kwargs = {}
new_handle = None
if hasattr(hint, "extras"):
# try to gain transformations that will satisfy the extra terms
for dep_lbl, dep_order in hint.extras.items():
new_info = copy(info)
new_info.dst_lbl = dep_lbl
new_info.dst_base = get_base(dep_lbl)
new_info.dst_order = dep_order
dep_handle = get_weight_transformation(new_info)
kwargs[dep_lbl] = dep_handle
if hint.src_lbl is not None:
# transformation to assistant system required
new_handle = get_weight_transformation(hint)
def last_handle(weights):
if new_handle:
return handle(new_handle(weights), **kwargs)
else:
return handle(weights, **kwargs)
return last_handle
def get_transformation_info(source_label, destination_label,
source_order=0, destination_order=0):
"""
Provide the weights transformation from one/source base to
another/destination base.
Args:
source_label (str): Label from the source base.
destination_label (str): Label from the destination base.
source_order: Order from the available time derivative
of the source weights.
destination_order: Order from the desired time derivative
of the destination weights.
Returns:
:py:class:`.TransformationInfo`: Transformation info object.
"""
info = TransformationInfo()
info.src_lbl = source_label
info.src_base = get_base(info.src_lbl)
info.src_order = source_order
info.dst_lbl = destination_label
info.dst_base = get_base(info.dst_lbl)
info.dst_order = destination_order
return info
def calculate_expanded_base_transformation_matrix(src_base, dst_base,
src_order, dst_order,
use_eye=False):
r"""
Constructs a transformation matrix :math:`\bar V` from basis given by
*src_base* to basis given by *dst_base* that also transforms all temporal
derivatives of the given weights.
See:
:py:func:`.calculate_base_transformation_matrix` for further details.
Args:
dst_base (:py:class:`.ApproximationBase`): New projection base.
src_base (:py:class:`.ApproximationBase`): Current projection base.
src_order: Temporal derivative order available in *src_base*.
dst_order: Temporal derivative order needed in *dst_base*.
use_eye (bool): Use identity as base transformation matrix.
(For easy selection of derivatives in the same base)
Raises:
ValueError: If destination needs a higher derivative order than source
can provide.
Return:
:obj:`numpy.ndarray`: Transformation matrix
"""
if src_order < dst_order:
raise ValueError(("higher 'dst_order'({0}) demanded than "
+ "'src_order'({1}) can provide for this strategy."
"").format(dst_order, src_order))
# build core transformation
if use_eye:
core_transformation = np.eye(src_base.fractions.size)
else:
core_transformation = calculate_base_transformation_matrix(src_base,
dst_base)
# build block matrix
part_transformation = block_diag(*[core_transformation
for i in range(dst_order + 1)])
complete_transformation = np.hstack([part_transformation]
+ [np.zeros((part_transformation.shape[0],
src_base.fractions.size))
for i in range(src_order - dst_order)])
return complete_transformation
def calculate_base_transformation_matrix(src_base, dst_base, scalar_product=None):
"""
Calculates the transformation matrix :math:`V` , so that the a
set of weights, describing a function in the
*src_base* will express the same function in the *dst_base*, while
minimizing the reprojection error.
An quadratic error is used as the error-norm for this case.
Warning:
This method assumes that all members of the given bases have
the same type and that their
:py:class:`.BaseFraction` s, define compatible scalar products.
Raises:
TypeError: If given bases do not provide an
:py:func:`.scalar_product_hint` method.
Args:
src_base (:py:class:`.ApproximationBase`): Current projection base.
dst_base (:py:class:`.ApproximationBase`): New projection base.
scalar_product (list of callable): Callbacks for product calculation.
Defaults to `scalar_product_hint` from `src_base`.
Return:
:py:class:`numpy.ndarray`: Transformation matrix :math:`V` .
"""
if not src_base.is_compatible_to(dst_base):
raise TypeError("Source and destination base must be from the same "
"type.")
p_mat = calculate_scalar_product_matrix(dst_base, src_base, scalar_product)
q_mat = calculate_scalar_product_matrix(dst_base, dst_base, scalar_product)
# compute V matrix, where V = inv(Q)*P
v_mat = np.dot(np.linalg.inv(q_mat), p_mat)
return v_mat
def normalize_base(b1, b2=None):
r"""
Takes two :py:class:`.ApproximationBase`'s :math:`\boldsymbol{b}_1` ,
:math:`\boldsymbol{b}_1` and normalizes them so that
:math:`\langle\boldsymbol{b}_{1i}\,
,\:\boldsymbol{b}_{2i}\rangle = 1`.
If only one base is given, :math:`\boldsymbol{b}_2`
defaults to :math:`\boldsymbol{b}_1`.
Args:
b1 (:py:class:`.ApproximationBase`): :math:`\boldsymbol{b}_1`
b2 (:py:class:`.ApproximationBase`): :math:`\boldsymbol{b}_2`
Raises:
ValueError: If :math:`\boldsymbol{b}_1`
and :math:`\boldsymbol{b}_2` are orthogonal.
Return:
:py:class:`.ApproximationBase` : if *b2* is None,
otherwise: Tuple of 2 :py:class:`.ApproximationBase`'s.
"""
auto_normalization = False
if b2 is None:
auto_normalization = True
res = generic_scalar_product(b1, b2)
if any(res < np.finfo(float).eps):
if any(np.isclose(res, 0)):
raise ValueError("given base fractions are orthogonal. "
"no normalization possible.")
else:
raise ValueError("imaginary scale required. "
"no normalization possible.")
scale_factors = np.sqrt(1 / res)
b1_scaled = b1.__class__(
[frac.scale(factor)
for frac, factor in zip(b1.fractions, scale_factors)])
if auto_normalization:
return b1_scaled
else:
b2_scaled = b2.__class__(
[frac.scale(factor)
for frac, factor in zip(b2.fractions, scale_factors)])
return b1_scaled, b2_scaled
def generic_scalar_product(b1, b2=None, scalar_product=None):
"""
Calculates the pairwise scalar product between the elements
of the :py:class:`.ApproximationBase` *b1* and *b2*.
Args:
b1 (:py:class:`.ApproximationBase`): first basis
b2 (:py:class:`.ApproximationBase`): second basis, if omitted
defaults to *b1*
scalar_product (list of callable): Callbacks for product calculation.
Defaults to `scalar_product_hint` from `b1`.
Note:
If *b2* is omitted, the result can be used to normalize
*b1* in terms of its scalar product.
"""
if b2 is None:
b2 = b1
if type(b1) != type(b2):
raise TypeError("only arguments of same type allowed.")
if scalar_product is None:
scalar_product = b1.scalar_product_hint()
res = vectorize_scalar_product(b1, b2, scalar_product)
return np.real_if_close(res)
def find_roots(function, grid, n_roots=None, rtol=1.e-5, atol=1.e-8,
cmplx=False, sort_mode="norm"):
r"""
Searches *n_roots* roots of the *function* :math:`f(\boldsymbol{x})`
on the given *grid* and checks them for uniqueness with aid of *rtol*.
In Detail :py:func:`scipy.optimize.root` is used to find initial candidates
for roots of :math:`f(\boldsymbol{x})` . If a root satisfies the criteria
given by atol and rtol it is added. If it is already in the list,
a comprehension between the already present entries' error and the
current error is performed. If the newly calculated root comes
with a smaller error it supersedes the present entry.
Raises:
ValueError: If the demanded amount of roots can't be found.
Args:
function (callable): Function handle for math:`f(\boldsymbol{x})`
whose roots shall be found.
grid (list): Grid to use as starting point for root detection.
The :math:`i` th element of this list provides sample points
for the :math:`i` th parameter of :math:`\boldsymbol{x}` .
n_roots (int): Number of roots to find. If none is given, return
all roots that could be found in the given area.
rtol: Tolerance to be exceeded for the difference of two roots
to be unique: :math:`f(r1) - f(r2) > \textrm{rtol}` .
atol: Absolute tolerance to zero: :math:`f(x^0) < \textrm{atol}` .
cmplx(bool): Set to True if the given *function* is complex valued.
sort_mode(str): Specify tho order in which the extracted roots shall be
sorted. Default "norm" sorts entries by their :math:`l_2` norm,
while "component" will sort them in increasing order by every
component.
Return:
numpy.ndarray of roots; sorted in the order they are returned by
:math:`f(\boldsymbol{x})` .
"""
if isinstance(grid[0], Number):
grid = [grid]
dim = len(grid)
if cmplx:
assert dim == 2
function = complex_wrapper(function)
roots = []
errors = []
grids = np.meshgrid(*[row for row in grid])
values = np.vstack([arr.flatten() for arr in grids]).T
# iterate over test_values
val = iter(values)
while True:
try:
res = root(function, next(val), tol=atol)
except StopIteration:
break
if not res.success:
continue
calculated_root = np.atleast_1d(res.x)
error = np.linalg.norm(res.fun)
# check for absolute tolerance
if error > atol:
continue
# check if root lies in expected area
abort = False
for rt, ar in zip(calculated_root, grid):
if ar.min() - atol > rt or ar.max() + atol < rt:
abort = True
break
if abort:
continue
if roots:
# check whether root is already present in cache
cmp_arr = np.isclose(calculated_root, roots, atol=rtol)
cmp_vec = [all(elements) for elements in cmp_arr]
if any(cmp_vec):
idx = cmp_vec.index(True)
if errors[idx] > error:
roots[idx] = calculated_root
errors[idx] = error
# TODO check jacobian (if provided)
# to identify roots of higher order
continue
roots.append(calculated_root)
errors.append(error)
if n_roots is None:
n_roots = len(roots)
if n_roots == 0:
# Either no roots have been found or zero roots have been requested
return np.array([])
if len(roots) < n_roots:
raise ValueError("Insufficient number of roots detected. ({0} < {1}) "
"Check provided function (see `visualize_roots`) or "
"try to increase the search area.".format(
len(roots), n_roots))
valid_roots = np.array(roots)
# sort roots
if sort_mode == "norm":
# sort entries by their norm
idx = np.argsort(np.linalg.norm(valid_roots, axis=1))
sorted_roots = valid_roots[idx, :]
elif sort_mode == "component":
# completely sort first column before we start
idx = np.argsort(valid_roots[:, 0])
sorted_roots = valid_roots[idx, :]
for layer in range(valid_roots.shape[1] - 1):
for rt in sorted_roots[:, layer]:
eq_mask = np.isclose(sorted_roots[:, layer], rt, rtol=rtol)
idx = np.argsort(sorted_roots[eq_mask, layer + 1])
sorted_roots[eq_mask] = sorted_roots[eq_mask][idx, :]
else:
raise ValueError("Sort mode: {} not supported.".format(sort_mode))
good_roots = sorted_roots[:n_roots]
if cmplx:
return good_roots[:, 0] + 1j * good_roots[:, 1]
if dim == 1:
return good_roots.flatten()
return good_roots
def complex_wrapper(func):
"""
Wraps complex valued functions into two-dimensional functions.
This enables the root-finding routine to handle it as a
vectorial function.
Args:
func (callable): Callable that returns a complex result.
Return:
two-dimensional, callable: function handle,
taking x = (re(x), im(x) and returning [re(func(x), im(func(x)].
"""
def wrapper(x):
val = func(np.complex(x[0], x[1]))
return np.array([np.real(val),
np.imag(val)])
return wrapper
class Parameters:
"""
Handy class to collect system parameters.
This class can be instantiated with a dict, whose keys will the
become attributes of the object.
(Bunch approach)
Args:
kwargs: parameters
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Domain(object):
"""
Helper class that manages ranges for data evaluation, containing
parameters.
Args:
bounds (tuple): Interval bounds.
num (int): Number of points in interval.
step (numbers.Number): Distance between points (if homogeneous).
points (array_like): Points themselves.
Note:
If num and step are given, num will take precedence.
"""
def __init__(self, bounds=None, num=None, step=None, points=None):
if points is not None:
# check for correct boundaries
if bounds and not all(bounds == points[[0, -1]]):
raise ValueError("Given 'bounds' don't fit the provided data.")
# check for correct length
if num is not None and len(points) != num:
raise ValueError("Given 'num' doesn't fit the provided data.")
# points are given, easy one
self._values = np.atleast_1d(points)
self._limits = (self._values.min(), self._values.max())
self._num = self._values.size
# check for evenly spaced entries
if self._num > 1:
steps = np.diff(self._values)
equal_steps = np.allclose(steps, steps[0])
if step:
if not equal_steps or step != steps[0]:
raise ValueError("Given 'step' doesn't fit the provided "
"data.")
else:
if equal_steps:
step = steps[0]
else:
step = np.nan
self._step = step
elif bounds and num:
self._limits = bounds
self._num = num
self._values, self._step = np.linspace(bounds[0],
bounds[1],
num,
retstep=True)
if step is not None and not np.isclose(self._step, step):
raise ValueError("could not satisfy both redundant "
"requirements for num and step!")
elif bounds and step:
self._limits = bounds
# calculate number of needed points but save correct step size
self._num = int((bounds[1] - bounds[0]) / step + 1.5)
self._values, self._step = np.linspace(bounds[0],
bounds[1],
self._num,
retstep=True)
if np.abs(step - self._step)/self._step > 1e-1:
warnings.warn("desired step-size {} doesn't fit to given "
"interval, changing to {}".format(step,
self._step))
else:
raise ValueError("not enough arguments provided!")
# mimic some ndarray properties
self.shape = self._values.shape
self.view = self._values.view
def __repr__(self):
return "Domain(bounds={}, step={}, num={})".format(self.bounds,
self._step,
self._num)
def __len__(self):
return len(self._values)
def __getitem__(self, item):
return self._values[item]
@property
def step(self):
return self._step
@property
def bounds(self):
return self._limits
@property
def points(self):
return self._values
@property
def ndim(self):
return self._values.ndim
def real(data):
"""
Check if the imaginary part of :code:`data` vanishes
and return its real part if it does.
Args:
data (numbers.Number or array_like): Possibly complex data to check.
Raises:
ValueError: If provided data can't be converted within
the given tolerance limit.
Return:
numbers.Number or array_like: Real part of :code:`data`.
"""
candidates = np.real_if_close(data, tol=100)
if candidates.dtype == 'complex':
raise ValueError("Imaginary part does not vanish, "
+ "check for implementation errors.")
# TODO make numpy array to common data type (even for scalar values)
if candidates.size == 1:
return float(candidates)
return candidates
class EvalData:
"""
This class helps managing any kind of result data.
The data gained by evaluation of a function is stored together with the
corresponding points of its evaluation. This way all data needed for
plotting or other postprocessing is stored in one place.
Next to the points of the evaluation the names and units of the included
axes can be stored.
After initialization an interpolator is set up, so that one can interpolate
in the result data by using the overloaded :py:meth:`__call__` method.
Args:
input_data: (List of) array(s) holding the axes of a regular grid on
which the evaluation took place.
output_data: The result of the evaluation.
Keyword Args:
input_labels: (List of) labels for the input axes.
input_units: (List of) units for the input axes.
name: Name of the generated data set.
fill_axes: If the dimension of `output_data` is higher than the
length of the given `input_data` list, dummy entries will be
appended until the required dimension is reached.
enable_extrapolation (bool): If True, internal interpolators will allow
extrapolation. Otherwise, the last giben value will be repeated for
1D cases and the result will be padded with zeros for cases > 1D.
fill_value: If invalid data is encountered, it will be replaced with
this value before interpolation is performed.
Examples:
When instantiating 1d EvalData objects, the list can be omitted
>>> axis = Domain((0, 10), 5)
>>> data = np.random.rand(5,)
>>> e_1d = EvalData(axis, data)
For other cases, input_data has to be a list
>>> axis1 = Domain((0, 0.5), 5)
>>> axis2 = Domain((0, 1), 11)
>>> data = np.random.rand(5, 11)
>>> e_2d = EvalData([axis1, axis2], data)
Adding two Instances (if the boundaries fit, the data will be
interpolated on the more coarse grid.) Same goes for subtraction and
multiplication.
>>> e_1 = EvalData(Domain((0, 10), 5), np.random.rand(5,))
>>> e_2 = EvalData(Domain((0, 10), 10), 100*np.random.rand(5,))
>>> e_3 = e_1 + e_2
>>> e_3.output_data.shape
(5,)
Interpolate in the output data by calling the object
>>> e_4 = EvalData(np.array(range(5)), 2*np.array(range(5))))
>>> e_4.output_data
array([0, 2, 4, 6, 8])
>>> e_5 = e_4([2, 5])
>>> e_5.output_data
array([4, 8])
>>> e_5.output_data.size
2
one may also give a slice
>>> e_6 = e_4(slice(1, 5, 2))
>>> e_6.output_data
array([2., 6.])
>>> e_5.output_data.size
2
For multi-dimensional interpolation a list has to be provided
>>> e_7 = e_2d([[.1, .5], [.3, .4, .7)])
>>> e_7.output_data.shape
(2, 3)
"""
def __init__(self, input_data, output_data,
input_labels=None, input_units=None,
enable_extrapolation=False,
fill_axes=False, fill_value=None,
name=None):
# check type and dimensions
if isinstance(input_data, np.ndarray) and input_data.ndim == 1:
# accept single array for single dimensional input
input_data = [input_data]
elif isinstance(input_data, Domain) and input_data.points.ndim == 1:
# some goes for domains
input_data = [input_data]
else:
assert isinstance(input_data, list)
# convert numpy arrays to domains
input_data = [Domain(points=entry)
if isinstance(entry, np.ndarray) else entry
for entry in input_data]
# if a list with names is provided, the dimension must fit
if input_labels is None:
input_labels = ["" for i in range(len(input_data))]
if not isinstance(input_labels, list):
input_labels = [input_labels]
assert len(input_labels) == len(input_data)
# if a list with units is provided, the dimension must fit
if input_units is None:
input_units = ["" for i in range(len(input_data))]
if not isinstance(input_units, list):
input_units = [input_units]
assert len(input_units) == len(input_data)
assert isinstance(output_data, np.ndarray)
if output_data.size == 0:
raise ValueError("No initialisation possible with an empty array!")
if fill_axes:
# add dummy axes to input_data for missing output dimensions
dim_diff = output_data.ndim - len(input_data)
for dim in range(dim_diff):
input_data.append(Domain(points=np.array(
range(output_data.shape[-(dim_diff - dim)]))))
input_labels.append("")
input_units.append("")
# output_data has to contain len(input_data) dimensions
assert len(input_data) == output_data.ndim
for dim in range(len(input_data)):
assert len(input_data[dim]) == output_data.shape[dim]
self.input_data = input_data
self.output_data = output_data
self.min = np.nanmin(output_data)
self.max = np.nanmax(output_data)
if len(input_data) == 1:
if enable_extrapolation:
fill_val = "extrapolate"
else:
fill_val = (output_data[0], output_data[-1])
self._interpolator = interp1d(
input_data[0],
np.ma.fix_invalid(output_data, fill_value=fill_value),
axis=-1,
bounds_error=False,
fill_value=fill_val)
elif len(input_data) == 2 and output_data.ndim == 2:
# pure 2d case
if enable_extrapolation:
raise ValueError("Extrapolation not supported for 2d data. See "
"https://github.com/scipy/scipy/issues/8099"
"for details.")
if len(input_data[0]) > 3 and len(input_data[1]) > 3 and False:
# special treatment for very common case (faster than interp2d)
# boundary values are used as fill values
self._interpolator = RectBivariateSpline(
*input_data,
np.ma.fix_invalid(output_data, fill_value=fill_value)
)
else:
# this will trigger nearest neighbour interpolation
fill_val = None
# if enable_extrapolation:
# fill_val = None
# else:
# Since the value has to be the same at every border
# fill_val = 0
self._interpolator = interp2d(
input_data[0],
input_data[1],
np.ma.fix_invalid(output_data.T, fill_value=fill_value),
bounds_error=False,
fill_value=fill_val)
else:
if enable_extrapolation:
fill_val = None
else:
# Since the value has to be the same at every border
fill_val = 0
self._interpolator = RegularGridInterpolator(
input_data,
np.ma.fix_invalid(output_data, fill_value=fill_value),
bounds_error=False,
fill_value=fill_val)
# handle names and units
self.input_labels = input_labels
self.input_units = input_units
self.name = name
if self.name is None:
self.name = ""
def adjust_input_vectors(self, other):
"""
Check the the inputs vectors of `self` and `other` for compatibility
(equivalence) and harmonize them if they are compatible.
The compatibility check is performed for every input_vector in
particular and examines whether they share the same boundaries.
and equalize to the minimal discretized axis.
If the amount of discretization steps between the two instances differs,
the more precise discretization is interpolated down onto the less
precise one.
Args:
other (:py:class:`.EvalData`): Other EvalData class.
Returns:
tuple:
- (list) - New common input vectors.
- (numpy.ndarray) - Interpolated self output_data array.
- (numpy.ndarray) - Interpolated other output_data array.
"""
assert len(self.input_data) == len(other.input_data)
if self.input_data == other.input_data:
return self.input_data, self.output_data, other.output_data
input_data = []
for idx in range(len(self.input_data)):
# check if axis have the same length
if self.input_data[idx].bounds != other.input_data[idx].bounds:
raise ValueError("Boundaries of input vector {0} don't match."
" {1} (self) != {2} (other)".format(
idx,
self.input_data[idx].bounds,
other.input_data[idx].bounds
))
# check which axis has the worst discretization
if len(self.input_data[idx]) <= len(other.input_data[idx]):
input_data.append(self.input_data[idx])
else:
input_data.append(other.input_data[idx])
# interpolate data
interpolated_self = self.interpolate(input_data)
interpolated_other = other.interpolate(input_data)
return (input_data,
interpolated_self.output_data,
interpolated_other.output_data)
def add(self, other, from_left=True):
"""
Perform the element-wise addition of the output_data arrays from `self`
and `other`
This method is used to support addition by implementing
__add__ (fromLeft=True) and __radd__(fromLeft=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`
The summation operation is performed on the interpolated output_data.
If `other` is a :class:`numbers.Number` it is added according to
numpy's broadcasting rules.
Args:
other (:py:class:`numbers.Number` or :py:class:`.EvalData`): Number
or EvalData object to add to self.
from_left (bool): Perform the addition from left if True or from
right if False.
Returns:
:py:class:`.EvalData` with adapted input_data and output_data as
result of the addition.
"""
if isinstance(other, numbers.Number):
if from_left:
output_data = self.output_data + other
else:
output_data = other + self.output_data
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} + {}".format(self.name, other))
elif isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
# add the output arrays
if from_left:
output_data = self_output_data + other_output_data
_name = self.name + " + " + other.name
else:
output_data = other_output_data + self_output_data
_name = other.name + " + " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __radd__(self, other):
return self.add(other, from_left=False)
def __add__(self, other):
return self.add(other)
def sub(self, other, from_left=True):
"""
Perform the element-wise subtraction of the output_data arrays from
`self` and `other` .
This method is used to support subtraction by implementing
__sub__ (from_left=True) and __rsub__(from_left=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`.
The subtraction operation is performed on the interpolated output_data.
If `other` is a :class:`numbers.Number` it is handled according to
numpy's broadcasting rules.
Args:
other (:py:class:`numbers.Number` or :py:class:`.EvalData`): Number
or EvalData object to subtract.
from_left (boolean): Perform subtraction from left if True or from
right if False.
Returns:
:py:class:`.EvalData` with adapted input_data and output_data as
result of subtraction.
"""
if isinstance(other, numbers.Number):
if from_left:
output_data = self.output_data - other
else:
output_data = other - self.output_data
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} - {}".format(self.name, other))
elif isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
# subtract the output arrays
if from_left:
output_data = self_output_data - other_output_data
_name = self.name + " - " + other.name
else:
output_data = other_output_data - self_output_data
_name = other.name + " - " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __rsub__(self, other):
return self.sub(other, from_left=False)
def __sub__(self, other):
return self.sub(other)
def mul(self, other, from_left=True):
"""
Perform the element-wise multiplication of the output_data arrays from
`self` and `other` .
This method is used to support multiplication by implementing
__mul__ (from_left=True) and __rmul__(from_left=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`.
The multiplication operation is performed on the interpolated
output_data. If `other` is a :class:`numbers.Number` it is handled
according to numpy's broadcasting rules.
Args:
other (:class:`numbers.Number` or :py:class:`.EvalData`): Factor
to multiply with.
from_left boolean: Multiplication from left if True or from right
if False.
Returns:
:py:class:`.EvalData` with adapted input_data and output_data as
result of multiplication.
"""
if isinstance(other, numbers.Number):
if from_left:
output_data = self.output_data * other
else:
output_data = other * self.output_data
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} - {}".format(self.name, other))
elif isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
# addition der output array
output_data = other_output_data * self_output_data
if from_left:
_name = self.name + " * " + other.name
else:
_name = other.name + " * " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __rmul__(self, other):
return self.mul(other, from_left=False)
def __mul__(self, other):
return self.mul(other)
def matmul(self, other, from_left=True):
"""
Perform the matrix multiplication of the output_data arrays from
`self` and `other` .
This method is used to support matrix multiplication (@) by implementing
__matmul__ (from_left=True) and __rmatmul__(from_left=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`.
The matrix multiplication operation is performed on the interpolated
output_data.
If `other` is a :class:`numbers.Number` it is handled according to
numpy's broadcasting rules.
Args:
other (:py:class:`EvalData`): Object to multiply with.
from_left (boolean): Matrix multiplication from left if True or
from right if False.
Returns:
:py:class:`EvalData` with adapted input_data and output_data as
result of matrix multiplication.
"""
if isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
if self.output_data.shape != other.output_data.shape:
raise ValueError("Dimension mismatch")
if from_left:
output_data = self_output_data @ other_output_data
_name = self.name + " @ " + other.name
else:
output_data = other_output_data @ self_output_data
_name = other.name + " @ " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __rmatmul__(self, other):
return self.matmul(other, from_left=False)
def __matmul__(self, other):
return self.matmul(other)
def __pow__(self, power):
"""
Raise the elements form `self.output_data` element-wise to `power`.
Args:
power (:class:`numbers.Number`): Power to raise to.
Returns:
:py:class:`EvalData` with self.input_data and output_data as results
of the raise operation.
"""
if isinstance(power, numbers.Number):
output_data = self.output_data ** power
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} ** {}".format(self.name, power))
else:
return NotImplemented
def sqrt(self):
"""
Radicate the elements form `self.output_data` element-wise.
Return:
:py:class:`EvalData` with self.input_data and output_data as result
of root calculation.
"""
output_data = np.sqrt(self.output_data)
ed = EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="sqrt({})".format(self.name))
return ed
def abs(self):
"""
Get the absolute value of the elements form `self.output_data` .
Return:
:py:class:`EvalData` with self.input_data and output_data as result
of absolute value calculation.
"""
output_data = np.abs(self.output_data)
ed = EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="abs({})".format(self.name))
return ed
def __call__(self, interp_axes, as_eval_data=True):
"""
Interpolation method for output_data.
Determines, if a one, two or three dimensional interpolation is used.
Method can handle slice objects in the pos lists.
One slice object is allowed per axis list.
Args:
interp_axes (list(list)): Axis positions in the form
- 1D: [axis] with axis=[1,2,3]
- 2D: [axis1, axis2] with axis1=[1,2,3] and axis2=[0,1,2,3,4]
as_eval_data (bool): Return the interpolation result as EvalData
object. If `False`, the output_data array of the results is
returned.
Returns:
:py:class:`EvalData` with pos as input_data and to pos interpolated
output_data.
"""
if len(self.input_data) == 1:
# special case for 1d data where the outermost list can be omitted
if isinstance(interp_axes, slice):
interp_axes = [interp_axes]
if isinstance(interp_axes, list) and \
all([isinstance(e, Number) for e in interp_axes]):
interp_axes = [interp_axes]
assert isinstance(interp_axes, list)
dim_err = len(self.input_data) - len(interp_axes)
assert dim_err >= 0
interp_axes += [slice(None) for x in range(dim_err)]
assert len(interp_axes) == len(self.input_data)
_list = []
for i, interp_points in enumerate(interp_axes):
if isinstance(interp_points, slice):
_entry = self.input_data[i][interp_points]
if _entry is None:
raise ValueError("Quantity resulting from slice is empty!")
else:
try:
_entry = list(interp_points)
except TypeError as e:
raise ValueError("Coordinates must be given as iterable!")
_list.append(_entry)
res = self.interpolate(_list)
if as_eval_data:
return res
else:
return res.output_data
def interpolate(self, interp_axis):
"""
Main interpolation method for output_data.
If one of the output dimensions is to be interpolated at one single
point, the dimension of the output will decrease by one.
Args:
interp_axis (list(list)): axis positions in the form
- 1D: axis with axis=[1,2,3]
- 2D: [axis1, axis2] with axis1=[1,2,3] and axis2=[0,1,2,3,4]
Returns:
:py:class:`EvalData` with `interp_axis` as new input_data and
interpolated output_data.
"""
assert isinstance(interp_axis, list)
assert len(interp_axis) == len(self.input_data)
# check if an axis has been degenerated
domains = [Domain(points=axis) for axis in interp_axis if len(axis) > 1]
if len(self.input_data) == 1:
interpolated_output = self._interpolator(interp_axis[0])
elif len(self.input_data) == 2:
interpolated_output = self._interpolator(*interp_axis)
if isinstance(self._interpolator, interp2d):
interpolated_output = interpolated_output.T
else:
dims = tuple(len(a) for a in interp_axis)
coords = np.array(
[a.flatten() for a in np.meshgrid(*interp_axis, indexing="ij")])
interpolated_output = self._interpolator(coords.T).reshape(dims)
out_arr = ma.masked_invalid(interpolated_output).squeeze()
return EvalData(input_data=domains,
output_data=out_arr,
name=self.name)
|
cklb/pyinduct
|
pyinduct/core.py
|
Python
|
gpl-3.0
| 100,888 | 0.000545 |
#!/usr/bin/env python
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Helper module
#
# =============================================================================
import argparse, sys
from confluent_kafka import avro, KafkaError
from confluent_kafka.admin import AdminClient, NewTopic
from uuid import uuid4
#import certifi
name_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Name",
"type": "record",
"fields": [
{"name": "name", "type": "string"}
]
}
"""
class Name(object):
"""
Name stores the deserialized Avro record for the Kafka key.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["name", "id"]
def __init__(self, name=None):
self.name = name
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_name(obj, ctx):
return Name(obj['name'])
@staticmethod
def name_to_dict(name, ctx):
return Name.to_dict(name)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(name=self.name)
# Schema used for serializing Count class, passed in as the Kafka value
count_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Count",
"type": "record",
"fields": [
{"name": "count", "type": "int"}
]
}
"""
class Count(object):
"""
Count stores the deserialized Avro record for the Kafka value.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["count", "id"]
def __init__(self, count=None):
self.count = count
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_count(obj, ctx):
return Count(obj['count'])
@staticmethod
def count_to_dict(count, ctx):
return Count.to_dict(count)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(count=self.count)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Confluent Python Client example to produce messages \
to Confluent Cloud")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-f',
dest="config_file",
help="path to Confluent Cloud configuration file",
required=True)
required.add_argument('-t',
dest="topic",
help="topic name",
required=True)
args = parser.parse_args()
return args
def read_ccloud_config(config_file):
"""Read Confluent Cloud configuration for librdkafka clients"""
conf = {}
with open(config_file) as fh:
for line in fh:
line = line.strip()
if len(line) != 0 and line[0] != "#":
parameter, value = line.strip().split('=', 1)
conf[parameter] = value.strip()
#conf['ssl.ca.location'] = certifi.where()
return conf
def pop_schema_registry_params_from_config(conf):
"""Remove potential Schema Registry related configurations from dictionary"""
conf.pop('schema.registry.url', None)
conf.pop('basic.auth.user.info', None)
conf.pop('basic.auth.credentials.source', None)
return conf
def create_topic(conf, topic):
"""
Create a topic if needed
Examples of additional admin API functionality:
https://github.com/confluentinc/confluent-kafka-python/blob/master/examples/adminapi.py
"""
admin_client_conf = pop_schema_registry_params_from_config(conf.copy())
a = AdminClient(admin_client_conf)
fs = a.create_topics([NewTopic(
topic,
num_partitions=1,
replication_factor=3
)])
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} created".format(topic))
except Exception as e:
# Continue if error code TOPIC_ALREADY_EXISTS, which may be true
# Otherwise fail fast
if e.args[0].code() != KafkaError.TOPIC_ALREADY_EXISTS:
print("Failed to create topic {}: {}".format(topic, e))
sys.exit(1)
|
confluentinc/examples
|
clients/cloud/python/ccloud_lib.py
|
Python
|
apache-2.0
| 5,500 | 0.001273 |
from phapi import ProductHuntApi
import settings
import json
pha = ProductHuntApi(settings.DEVELOPER_TOKEN)
posts = pha.get_posts()
print json.dumps(posts, indent=2)
|
dangoldin/pyproducthunt
|
analyze.py
|
Python
|
mit
| 168 | 0.005952 |
import subprocess
from distutils import spawn
brctlexe = spawn.find_executable("brctl")
ipexe = spawn.find_executable("ip")
class BridgeException(Exception):
pass
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
class BridgeController(object):
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
def delbr(self, name):
""" Set the device down and delete the bridge. """
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
def getbr(self, name):
""" Return a bridge object."""
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
def _runshell(cmd, exception):
""" Run a shell command. if fails, raise a proper exception. """
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() != 0:
raise BridgeException(exception)
return p
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Python
|
gpl-2.0
| 5,254 | 0.007423 |
'''Test for ratio of Poisson intensities in two independent samples
Author: Josef Perktold
License: BSD-3
'''
import numpy as np
import warnings
from scipy import stats
from statsmodels.stats.base import HolderTuple
from statsmodels.stats.weightstats import _zstat_generic2
def test_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1,
method='score', alternative='two-sided',
etest_kwds=None):
'''test for ratio of two sample Poisson intensities
If the two Poisson rates are g1 and g2, then the Null hypothesis is
- H0: g1 / g2 = ratio_null
against one of the following alternatives
- H1_2-sided: g1 / g2 != ratio_null
- H1_larger: g1 / g2 > ratio_null
- H1_smaller: g1 / g2 < ratio_null
Parameters
----------
count1 : int
Number of events in first sample.
exposure1 : float
Total exposure (time * subjects) in first sample.
count2 : int
Number of events in second sample.
exposure2 : float
Total exposure (time * subjects) in second sample.
ratio: float
ratio of the two Poisson rates under the Null hypothesis. Default is 1.
method : string
Method for the test statistic and the p-value. Defaults to `'score'`.
Current Methods are based on Gu et. al 2008.
Implemented are 'wald', 'score' and 'sqrt' based asymptotic normal
distribution, and the exact conditional test 'exact-cond', and its
mid-point version 'cond-midp'. method='etest' and method='etest-wald'
provide pvalues from `etest_poisson_2indep` using score or wald
statistic respectively.
see Notes.
alternative : string
The alternative hypothesis, H1, has to be one of the following
- 'two-sided': H1: ratio of rates is not equal to ratio_null (default)
- 'larger' : H1: ratio of rates is larger than ratio_null
- 'smaller' : H1: ratio of rates is smaller than ratio_null
etest_kwds: dictionary
Additional parameters to be passed to the etest_poisson_2indep
function, namely y_grid.
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
Notes
-----
- 'wald': method W1A, wald test, variance based on separate estimates
- 'score': method W2A, score test, variance based on estimate under Null
- 'wald-log': W3A
- 'score-log' W4A
- 'sqrt': W5A, based on variance stabilizing square root transformation
- 'exact-cond': exact conditional test based on binomial distribution
- 'cond-midp': midpoint-pvalue of exact conditional test
- 'etest': etest with score test statistic
- 'etest-wald': etest with wald test statistic
References
----------
Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,
Biometrical Journal 50 (2008) 2, 2008
See Also
--------
tost_poisson_2indep
etest_poisson_2indep
'''
# shortcut names
y1, n1, y2, n2 = count1, exposure1, count2, exposure2
d = n2 / n1
r = ratio_null
r_d = r / d
if method in ['score']:
stat = (y1 - y2 * r_d) / np.sqrt((y1 + y2) * r_d)
dist = 'normal'
elif method in ['wald']:
stat = (y1 - y2 * r_d) / np.sqrt(y1 + y2 * r_d**2)
dist = 'normal'
elif method in ['sqrt']:
stat = 2 * (np.sqrt(y1 + 3 / 8.) - np.sqrt((y2 + 3 / 8.) * r_d))
stat /= np.sqrt(1 + r_d)
dist = 'normal'
elif method in ['exact-cond', 'cond-midp']:
from statsmodels.stats import proportion
bp = r_d / (1 + r_d)
y_total = y1 + y2
stat = None
# TODO: why y2 in here and not y1, check definition of H1 "larger"
pvalue = proportion.binom_test(y1, y_total, prop=bp,
alternative=alternative)
if method in ['cond-midp']:
# not inplace in case we still want binom pvalue
pvalue = pvalue - 0.5 * stats.binom.pmf(y1, y_total, bp)
dist = 'binomial'
elif method.startswith('etest'):
if method.endswith('wald'):
method_etest = 'wald'
else:
method_etest = 'score'
if etest_kwds is None:
etest_kwds = {}
stat, pvalue = etest_poisson_2indep(
count1, exposure1, count2, exposure2, ratio_null=ratio_null,
method=method_etest, alternative=alternative, **etest_kwds)
dist = 'poisson'
else:
raise ValueError('method not recognized')
if dist == 'normal':
stat, pvalue = _zstat_generic2(stat, 1, alternative)
rates = (y1 / n1, y2 / n2)
ratio = rates[0] / rates[1]
res = HolderTuple(statistic=stat,
pvalue=pvalue,
distribution=dist,
method=method,
alternative=alternative,
rates=rates,
ratio=ratio,
ratio_null=ratio_null)
return res
def etest_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1,
method='score', alternative='2-sided', ygrid=None,
y_grid=None):
"""E-test for ratio of two sample Poisson rates
If the two Poisson rates are g1 and g2, then the Null hypothesis is
- H0: g1 / g2 = ratio_null
against one of the following alternatives
- H1_2-sided: g1 / g2 != ratio_null
- H1_larger: g1 / g2 > ratio_null
- H1_smaller: g1 / g2 < ratio_null
Parameters
----------
count1 : int
Number of events in first sample
exposure1 : float
Total exposure (time * subjects) in first sample
count2 : int
Number of events in first sample
exposure2 : float
Total exposure (time * subjects) in first sample
ratio : float
ratio of the two Poisson rates under the Null hypothesis. Default is 1.
method : {"score", "wald"}
Method for the test statistic that defines the rejection region.
alternative : string
The alternative hypothesis, H1, has to be one of the following
'two-sided': H1: ratio of rates is not equal to ratio_null (default)
'larger' : H1: ratio of rates is larger than ratio_null
'smaller' : H1: ratio of rates is smaller than ratio_null
y_grid : None or 1-D ndarray
Grid values for counts of the Poisson distribution used for computing
the pvalue. By default truncation is based on an upper tail Poisson
quantiles.
ygrid : None or 1-D ndarray
Same as y_grid. Deprecated. If both y_grid and ygrid are provided,
ygrid will be ignored.
Returns
-------
stat_sample : float
test statistic for the sample
pvalue : float
References
----------
Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,
Biometrical Journal 50 (2008) 2, 2008
"""
y1, n1, y2, n2 = count1, exposure1, count2, exposure2
d = n2 / n1
r = ratio_null
r_d = r / d
eps = 1e-20 # avoid zero division in stat_func
if method in ['score']:
def stat_func(x1, x2):
return (x1 - x2 * r_d) / np.sqrt((x1 + x2) * r_d + eps)
# TODO: do I need these? return_results ?
# rate2_cmle = (y1 + y2) / n2 / (1 + r_d)
# rate1_cmle = rate2_cmle * r
# rate1 = rate1_cmle
# rate2 = rate2_cmle
elif method in ['wald']:
def stat_func(x1, x2):
return (x1 - x2 * r_d) / np.sqrt(x1 + x2 * r_d**2 + eps)
# rate2_mle = y2 / n2
# rate1_mle = y1 / n1
# rate1 = rate1_mle
# rate2 = rate2_mle
else:
raise ValueError('method not recognized')
# The sampling distribution needs to be based on the null hypotheis
# use constrained MLE from 'score' calculation
rate2_cmle = (y1 + y2) / n2 / (1 + r_d)
rate1_cmle = rate2_cmle * r
rate1 = rate1_cmle
rate2 = rate2_cmle
mean1 = n1 * rate1
mean2 = n2 * rate2
stat_sample = stat_func(y1, y2)
if ygrid is not None:
warnings.warn("ygrid is deprecated, use y_grid", DeprecationWarning)
y_grid = y_grid if y_grid is not None else ygrid
# The following uses a fixed truncation for evaluating the probabilities
# It will currently only work for small counts, so that sf at truncation
# point is small
# We can make it depend on the amount of truncated sf.
# Some numerical optimization or checks for large means need to be added.
if y_grid is None:
threshold = stats.poisson.isf(1e-13, max(mean1, mean2))
threshold = max(threshold, 100) # keep at least 100
y_grid = np.arange(threshold + 1)
else:
y_grid = np.asarray(y_grid)
if y_grid.ndim != 1:
raise ValueError("y_grid needs to be None or 1-dimensional array")
pdf1 = stats.poisson.pmf(y_grid, mean1)
pdf2 = stats.poisson.pmf(y_grid, mean2)
stat_space = stat_func(y_grid[:, None], y_grid[None, :]) # broadcasting
eps = 1e-15 # correction for strict inequality check
if alternative in ['two-sided', '2-sided', '2s']:
mask = np.abs(stat_space) >= np.abs(stat_sample) - eps
elif alternative in ['larger', 'l']:
mask = stat_space >= stat_sample - eps
elif alternative in ['smaller', 's']:
mask = stat_space <= stat_sample + eps
else:
raise ValueError('invalid alternative')
pvalue = ((pdf1[:, None] * pdf2[None, :])[mask]).sum()
return stat_sample, pvalue
def tost_poisson_2indep(count1, exposure1, count2, exposure2, low, upp,
method='score'):
'''Equivalence test based on two one-sided `test_proportions_2indep`
This assumes that we have two independent binomial samples.
The Null and alternative hypothesis for equivalence testing are
- H0: g1 / g2 <= low or upp <= g1 / g2
- H1: low < g1 / g2 < upp
where g1 and g2 are the Poisson rates.
Parameters
----------
count1 : int
Number of events in first sample
exposure1 : float
Total exposure (time * subjects) in first sample
count2 : int
Number of events in second sample
exposure2 : float
Total exposure (time * subjects) in second sample
low, upp :
equivalence margin for the ratio of Poisson rates
method: string
Method for the test statistic and the p-value. Defaults to `'score'`.
Current Methods are based on Gu et. al 2008
Implemented are 'wald', 'score' and 'sqrt' based asymptotic normal
distribution, and the exact conditional test 'exact-cond', and its
mid-point version 'cond-midp', see Notes
Returns
-------
results : instance of HolderTuple class
The two main attributes are test statistic `statistic` and p-value
`pvalue`.
Notes
-----
- 'wald': method W1A, wald test, variance based on separate estimates
- 'score': method W2A, score test, variance based on estimate under Null
- 'wald-log': W3A not implemented
- 'score-log' W4A not implemented
- 'sqrt': W5A, based on variance stabilizing square root transformation
- 'exact-cond': exact conditional test based on binomial distribution
- 'cond-midp': midpoint-pvalue of exact conditional test
The latter two are only verified for one-sided example.
References
----------
Gu, Ng, Tang, Schucany 2008: Testing the Ratio of Two Poisson Rates,
Biometrical Journal 50 (2008) 2, 2008
See Also
--------
test_poisson_2indep
'''
tt1 = test_poisson_2indep(count1, exposure1, count2, exposure2,
ratio_null=low, method=method,
alternative='larger')
tt2 = test_poisson_2indep(count1, exposure1, count2, exposure2,
ratio_null=upp, method=method,
alternative='smaller')
idx_max = 0 if tt1.pvalue < tt2.pvalue else 1
res = HolderTuple(statistic=[tt1.statistic, tt2.statistic][idx_max],
pvalue=[tt1.pvalue, tt2.pvalue][idx_max],
method=method,
results_larger=tt1,
results_smaller=tt2,
title="Equivalence test for 2 independent Poisson rates"
)
return res
|
statsmodels/statsmodels
|
statsmodels/stats/rates.py
|
Python
|
bsd-3-clause
| 12,513 | 0 |
import os
from flask import Flask, render_template_string, request
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask_user import roles_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///single_file_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'email@example.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <noreply@example.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
mail = Mail(app) # Initialize Flask-Mail
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user_auth = db.relationship('UserAuth', uselist=False)
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define the UserAuth data model.
class UserAuth(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
user = db.relationship('User', uselist=False)
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserAuthClass=UserAuth)
user_manager = UserManager(db_adapter, app)
# Create 'user007' user with 'secret' and 'agent' roles
if not UserAuth.query.filter(UserAuth.username=='user007').first():
user1 = User(email='user007@example.com', first_name='James', last_name='Bond', active=True)
db.session.add(user1)
user_auth1 = UserAuth(user=user1, username='user007',
password=user_manager.hash_password('Password1')
)
db.session.add(user_auth1)
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.commit()
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Special page requires a user with 'special' and 'sauce' roles or with 'special' and 'agent' roles.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Special Page</h2>
<p>This page can only be accessed by user007.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
|
jamescarignan/Flask-User
|
example_apps/user_auth_app.py
|
Python
|
bsd-2-clause
| 6,986 | 0.006298 |
class SkipList:
def __init__(self):
self.head = None
|
robin1885/algorithms-exercises-using-python
|
source-code-from-author-book/Listings-for-Second-Edition/listing_8_14.py
|
Python
|
mit
| 65 | 0 |
# replace all key events in
# js files and htmls
# to our standard key input event
# more details see in DOC dir
# Key 事件进行全局替换, 统一处理。
|
lifeinoppo/littlefishlet-scode
|
SRC/Server/Components/input/python/keyInput.py
|
Python
|
gpl-2.0
| 175 | 0.034014 |
from contextlib import contextmanager
import sys
from . import controller
from .utils import (CursorPosition, TextQuery)
if sys.platform.startswith("win"):
from . import ia2
os_controller_class = ia2.Controller
else:
# TODO Support Linux.
pass
controller_instance = None
def get_accessibility_controller():
"""Get the OS-independent accessibility controller which is the gateway to all
accessibility functionality."""
global controller_instance
if not controller_instance or controller_instance.stopped:
os_controller = os_controller_class()
controller_instance = controller.AccessibilityController(os_controller)
return controller_instance
@contextmanager
def get_stopping_accessibility_controller():
"""Same as :func:`get_accessibility_controller`, but automatically stops when
used in a `with` context."""
yield get_accessibility_controller()
controller_instance.stop()
|
tylercal/dragonfly
|
dragonfly/accessibility/__init__.py
|
Python
|
lgpl-3.0
| 949 | 0.004215 |
#!/usr/bin/env python
# http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
import re
import unicodedata
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def text_to_id(text):
"""
Convert input text to id.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = strip_accents(text.lower())
text = re.sub('[ ]+', '_', text)
text = re.sub('[^0-9a-zA-Z_-]', '', text)
return text
|
oh6hay/refworks-bibtex-postprocess
|
textutil.py
|
Python
|
mit
| 943 | 0.004242 |
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import operator
import nova.scheduler
from nova.scheduler.filters import abstract_filter
class JsonFilter(abstract_filter.AbstractHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = not args[0] in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms"""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def instance_type_to_filter(self, instance_type):
"""Convert instance_type into JSON filter object."""
required_ram = instance_type['memory_mb']
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
['>=', '$compute.disk_available', required_disk]]
return json.dumps(query)
def _parse_string(self, string, host, hostinfo):
"""Strings prefixed with $ are capability lookups in the
form '$service.capability[.subcap*]'.
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
services = dict(compute=hostinfo.compute, network=hostinfo.network,
volume=hostinfo.volume)
service = services.get(path[0], None)
if not service:
return None
for item in path[1:]:
service = service.get(item, None)
if not service:
return None
return service
def _process_filter(self, query, host, hostinfo):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host, hostinfo)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host, hostinfo)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
expanded = json.loads(query)
filtered_hosts = []
for host, hostinfo in host_list:
if not hostinfo:
continue
if hostinfo.compute and not hostinfo.compute.get("enabled", True):
# Host is disabled
continue
result = self._process_filter(expanded, host, hostinfo)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
filtered_hosts.append((host, hostinfo))
return filtered_hosts
|
salv-orlando/MyRepo
|
nova/scheduler/filters/json_filter.py
|
Python
|
apache-2.0
| 5,243 | 0.000572 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Test misc module."""
import os
import shutil
from unittest import mock
import pytest
from ..misc import pass_dummy_scans, check_valid_fs_license
@pytest.mark.parametrize(
"algo_dummy_scans,dummy_scans,expected_out", [(2, 1, 1), (2, None, 2), (2, 0, 0)]
)
def test_pass_dummy_scans(algo_dummy_scans, dummy_scans, expected_out):
"""Check dummy scans passing."""
skip_vols = pass_dummy_scans(algo_dummy_scans, dummy_scans)
assert skip_vols == expected_out
@pytest.mark.parametrize(
"stdout,rc,valid",
[
(b"Successful command", 0, True),
(b"", 0, True),
(b"ERROR: FreeSurfer license file /made/up/license.txt not found", 1, False),
(b"Failed output", 1, False),
(b"ERROR: Systems running GNU glibc version greater than 2.15", 0, False),
],
)
def test_fs_license_check(stdout, rc, valid):
with mock.patch("subprocess.run") as mocked_run:
mocked_run.return_value.stdout = stdout
mocked_run.return_value.returncode = rc
assert check_valid_fs_license() is valid
@pytest.mark.skipif(not os.getenv("FS_LICENSE"), reason="No FS license found")
def test_fs_license_check2(monkeypatch):
"""Execute the canary itself."""
assert check_valid_fs_license() is True
@pytest.mark.skipif(shutil.which('mri_convert') is None, reason="FreeSurfer not installed")
def test_fs_license_check3(monkeypatch):
with monkeypatch.context() as m:
m.delenv("FS_LICENSE", raising=False)
m.delenv("FREESURFER_HOME", raising=False)
assert check_valid_fs_license() is False
|
oesteban/niworkflows
|
niworkflows/utils/tests/test_misc.py
|
Python
|
bsd-3-clause
| 2,459 | 0.001627 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
from ansible import constants as C
__all__ = ["CallbackBase"]
class CallbackBase:
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
# FIXME: the list of functions here needs to be updated once we have
# finalized the list of callback methods used in the default callback
def __init__(self, display):
self._display = display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'with no defined name')
ctype = getattr(self, 'CALLBACK_TYPE', 'unknwon')
version = getattr(self, 'CALLBACK_VERSION', 'unknwon')
self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
def _dump_results(self, result, indent=4, sort_keys=True):
return json.dumps(result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.COMMAND_WARNINGS and 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
####### V2 METHODS, by default they call v1 counterparts if possible ######
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
host = result._host.get_name()
#FIXME, get item to pass through
item = None
self.runner_on_skipped(host, item)
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_no_hosts(self, task):
self.runner_on_no_hosts()
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
#FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_runner_on_file_diff(self, result, diff):
pass #no v1 correspondance
def v2_playbook_on_start(self):
self.playbook_on_start()
def v2_playbook_on_notify(self, result, handler):
host = result._host.get_name()
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_handler_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
def v2_playbook_on_setup(self):
self.playbook_on_setup()
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
|
wfxiang08/ansible
|
lib/ansible/plugins/callback/__init__.py
|
Python
|
gpl-3.0
| 6,672 | 0.002998 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ram_file_system.h."""
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class RamFilesystemTest(test_util.TensorFlowTestCase):
def test_create_and_delete_directory(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.delete_recursively_v2('ram://testdirectory')
def test_create_and_delete_directory_tree_recursive(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.create_dir_v2('ram://testdirectory/subdir1')
file_io.create_dir_v2('ram://testdirectory/subdir2')
file_io.create_dir_v2('ram://testdirectory/subdir1/subdir3')
with gfile.GFile('ram://testdirectory/subdir1/subdir3/a.txt', 'w') as f:
f.write('Hello, world.')
file_io.delete_recursively_v2('ram://testdirectory')
self.assertEqual(gfile.Glob('ram://testdirectory/*'), [])
def test_write_file(self):
with gfile.GFile('ram://a.txt', 'w') as f:
f.write('Hello, world.')
f.write('Hello, world.')
with gfile.GFile('ram://a.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_append_file_with_seek(self):
with gfile.GFile('ram://c.txt', 'w') as f:
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'w+') as f:
f.seek(offset=0, whence=2)
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_list_dir(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['%d.txt' % i for i in range(10)]
self.assertEqual(gfile.ListDirectory('ram://a/b/'), matches)
def test_glob(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://a/b/*'), matches)
matches = []
self.assertEqual(gfile.Glob('ram://b/b/*'), matches)
matches = ['ram://c/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://c/b/*'), matches)
def test_file_exists(self):
with gfile.GFile('ram://exists/a/b/c.txt', 'w') as f:
f.write('')
self.assertTrue(gfile.Exists('ram://exists/a'))
self.assertTrue(gfile.Exists('ram://exists/a/b'))
self.assertTrue(gfile.Exists('ram://exists/a/b/c.txt'))
self.assertFalse(gfile.Exists('ram://exists/b'))
self.assertFalse(gfile.Exists('ram://exists/a/c'))
self.assertFalse(gfile.Exists('ram://exists/a/b/k'))
def test_estimator(self):
def model_fn(features, labels, mode, params):
del params
x = core_layers.dense(features, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
x = core_layers.dense(x, 100)
y = core_layers.dense(x, 1)
loss = losses.mean_squared_error(labels, y)
opt = adam.AdamOptimizer(learning_rate=0.1)
train_op = opt.minimize(
loss, global_step=training_util.get_or_create_global_step())
return EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def input_fn():
batch_size = 128
return (constant_op.constant(np.random.randn(batch_size, 100),
dtype=dtypes.float32),
constant_op.constant(np.random.randn(batch_size, 1),
dtype=dtypes.float32))
config = RunConfig(
model_dir='ram://estimator-0/', save_checkpoints_steps=1)
estimator = Estimator(config=config, model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
estimator.train(input_fn=input_fn, steps=10)
def test_savedmodel(self):
class MyModule(module.Module):
@def_function.function(input_signature=[])
def foo(self):
return constant_op.constant([1])
saved_model.save(MyModule(), 'ram://my_module')
loaded = saved_model.load('ram://my_module')
self.assertAllEqual(loaded.foo(), [1])
if __name__ == '__main__':
test.main()
|
tensorflow/tensorflow
|
tensorflow/core/platform/ram_file_system_test.py
|
Python
|
apache-2.0
| 5,699 | 0.006492 |
from boto.exception import S3ResponseError, BotoServerError
from boto.s3.connection import S3Connection
from boto.ec2.autoscale import AutoScaleConnection
from boto.beanstalk import connect_to_region
from boto.s3.key import Key
from datetime import datetime
from time import time, sleep
import zipfile
import os
import subprocess
import sys
import yaml
import re
import logging
logger = None
LOGGER_NAME = 'ebs_deploy'
MAX_RED_SAMPLES = 20
def utcnow_isoformat():
return datetime.utcnow().isoformat() + 'Z'
def out(message):
"""
print alias
"""
if logger:
logger.info("%s", message)
else:
sys.stdout.write(message + "\n")
sys.stdout.flush()
def init_logging(use_logging=False):
global logger
if use_logging:
logger = logging.getLogger(LOGGER_NAME)
def configure_logging(level, handlers):
l = logging.getLogger(LOGGER_NAME)
l.setLevel(level)
for h in l.handlers[:]:
l.removeHandler(h)
for h in handlers:
l.addHandler(h)
return l
def merge_dict(dict1, dict2):
ret = dict(dict2)
for key, val in dict1.items():
val2 = dict2.get(key)
if val2 is None:
ret[key] = val
elif isinstance(val, dict) and isinstance(val2, dict):
ret[key] = merge_dict(val, val2)
elif isinstance(val, (list,)) and isinstance(val2, (list,)):
ret[key] = val + val2
else:
ret[key] = val2
return ret
def get(vals, key, default_val=None):
"""
Returns a dictionary value
"""
val = vals
for part in key.split('.'):
if isinstance(val, dict):
val = val.get(part, None)
if val is None:
return default_val
else:
return default_val
return val
def parse_option_settings(option_settings):
"""
Parses option_settings as they are defined in the configuration file
"""
ret = []
for namespace, params in option_settings.items():
for key, value in params.items():
ret.append((namespace, key, value))
return ret
def override_scaling(option_settings, min_size, max_size):
""" takes the merged option_settings and injects custom min/max autoscaling sizes """
match_namespace = "aws:autoscaling:asg"
match_keys = {"MinSize": min_size, "MaxSize": max_size}
copied_option_settings = []
for (namespace, key, value) in option_settings:
new_option = (namespace, key, value)
if match_namespace == namespace and key in match_keys:
new_option = (namespace, key, match_keys[key])
copied_option_settings.append(new_option)
return copied_option_settings
def parse_env_config(config, env_name):
"""
Parses an environment config
"""
all_env = get(config, 'app.all_environments', {})
env = get(config, 'app.environments.' + str(env_name), {})
return merge_dict(all_env, env)
def upload_application_archive(helper, env_config, archive=None, directory=None, version_label=None):
if version_label is None:
version_label = datetime.now().strftime('%Y%m%d_%H%M%S')
else:
# don't attempt to create an application version which already exists
existing_version_labels = [version['VersionLabel'] for version in helper.get_versions()]
if version_label in existing_version_labels:
return version_label
archive_file_name = None
if archive:
archive_file_name = os.path.basename(archive)
archive_files = get(env_config, 'archive.files', [])
# generate the archive externally
if get(env_config, 'archive.generate'):
cmd = get(env_config, 'archive.generate.cmd')
output_file = get(env_config, 'archive.generate.output_file')
use_shell = get(env_config, 'archive.generate.use_shell', True)
exit_code = get(env_config, 'archive.generate.exit_code', 0)
if not cmd or not output_file:
raise Exception('Archive generation requires cmd and output_file at a minimum')
output_regex = None
try:
output_regex = re.compile(output_file)
except:
pass
result = subprocess.call(cmd, shell=use_shell)
if result != exit_code:
raise Exception('Generate command execited with code %s (expected %s)' % (result, exit_code))
if output_file and os.path.exists(output_file):
archive_file_name = os.path.basename(output_file)
else:
for root, dirs, files in os.walk(".", followlinks=True):
for f in files:
fullpath = os.path.join(root, f)
if fullpath.endswith(output_file):
archive = fullpath
archive_file_name = os.path.basename(fullpath)
break
elif output_regex and output_regex.match(fullpath):
archive = fullpath
archive_file_name = os.path.basename(fullpath)
break
if archive:
break
if not archive or not archive_file_name:
raise Exception('Unable to find expected output file matching: %s' % (output_file))
# create the archive
elif not archive:
if not directory:
directory = "."
includes = get(env_config, 'archive.includes', [])
excludes = get(env_config, 'archive.excludes', [])
def _predicate(f):
for exclude in excludes:
if re.match(exclude, f):
return False
if len(includes) > 0:
for include in includes:
if re.match(include, f):
return True
return False
return True
archive = create_archive(directory, str(version_label) + ".zip", config=archive_files, ignore_predicate=_predicate)
archive_file_name = str(version_label) + ".zip"
add_config_files_to_archive(directory, archive, config=archive_files)
helper.upload_archive(archive, archive_file_name)
helper.create_application_version(version_label, archive_file_name)
return version_label
def create_archive(directory, filename, config={}, ignore_predicate=None, ignored_files=['.git', '.svn']):
"""
Creates an archive from a directory and returns
the file that was created.
"""
with zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED) as zip_file:
root_len = len(os.path.abspath(directory))
# create it
out("Creating archive: " + str(filename))
for root, dirs, files in os.walk(directory, followlinks=True):
archive_root = os.path.abspath(root)[root_len + 1:]
for f in files:
fullpath = os.path.join(root, f)
archive_name = os.path.join(archive_root, f)
# ignore the file we're creating
if filename in fullpath:
continue
# ignored files
if ignored_files is not None:
for name in ignored_files:
if fullpath.endswith(name):
out("Skipping: " + str(name))
continue
# do predicate
if ignore_predicate is not None:
if not ignore_predicate(archive_name):
out("Skipping: " + str(archive_name))
continue
out("Adding: " + str(archive_name))
zip_file.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
return filename
def add_config_files_to_archive(directory, filename, config={}):
"""
Adds configuration files to an existing archive
"""
with zipfile.ZipFile(filename, 'a') as zip_file:
for conf in config:
for conf, tree in conf.items():
if tree.has_key('yaml'):
content = yaml.dump(tree['yaml'], default_flow_style=False)
else:
content = tree.get('content', '')
out("Adding file " + str(conf) + " to archive " + str(filename))
file_entry = zipfile.ZipInfo(conf)
file_entry.external_attr = tree.get('permissions', 0644) << 16L
zip_file.writestr(file_entry, content)
return filename
class AwsCredentials:
"""
Class for holding AwsCredentials
"""
def __init__(self, access_key, secret_key, security_token, region, bucket, bucket_path):
self.access_key = access_key
self.secret_key = secret_key
self.security_token = security_token
self.bucket = bucket
self.region = region
self.bucket_path = bucket_path
if not self.bucket_path.endswith('/'):
self.bucket_path += '/'
class EbsHelper(object):
"""
Class for helping with ebs
"""
def __init__(self, aws, wait_time_secs, app_name=None,):
"""
Creates the EbsHelper
"""
self.aws = aws
self.ebs = connect_to_region(aws.region, aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key,
security_token=aws.security_token)
self.autoscale = AutoScaleConnection(aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key,
security_token=aws.security_token)
self.s3 = S3Connection(
aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key,
security_token=aws.security_token,
host=(lambda r: 's3.amazonaws.com' if r == 'us-east-1' else 's3-' + r + '.amazonaws.com')(aws.region))
self.app_name = app_name
self.wait_time_secs = wait_time_secs
def swap_environment_cnames(self, from_env_name, to_env_name):
"""
Swaps cnames for an environment
"""
self.ebs.swap_environment_cnames(source_environment_name=from_env_name,
destination_environment_name=to_env_name)
def upload_archive(self, filename, key, auto_create_bucket=True):
"""
Uploads an application archive version to s3
"""
try:
bucket = self.s3.get_bucket(self.aws.bucket)
if ((
self.aws.region != 'us-east-1' and self.aws.region != 'eu-west-1') and bucket.get_location() != self.aws.region) or (
self.aws.region == 'us-east-1' and bucket.get_location() != '') or (
self.aws.region == 'eu-west-1' and bucket.get_location() != 'eu-west-1'):
raise Exception("Existing bucket doesn't match region")
except S3ResponseError:
bucket = self.s3.create_bucket(self.aws.bucket, location=self.aws.region)
def __report_upload_progress(sent, total):
if not sent:
sent = 0
if not total:
total = 0
out("Uploaded " + str(sent) + " bytes of " + str(total) \
+ " (" + str(int(float(max(1, sent)) / float(total) * 100)) + "%)")
# upload the new version
k = Key(bucket)
k.key = self.aws.bucket_path + key
k.set_metadata('time', str(time()))
k.set_contents_from_filename(filename, cb=__report_upload_progress, num_cb=10)
def list_available_solution_stacks(self):
"""
Returns a list of available solution stacks
"""
stacks = self.ebs.list_available_solution_stacks()
return stacks['ListAvailableSolutionStacksResponse']['ListAvailableSolutionStacksResult']['SolutionStacks']
def create_application(self, description=None):
"""
Creats an application and sets the helpers current
app_name to the created application
"""
out("Creating application " + str(self.app_name))
self.ebs.create_application(self.app_name, description=description)
def delete_application(self):
"""
Creats an application and sets the helpers current
app_name to the created application
"""
out("Deleting application " + str(self.app_name))
self.ebs.delete_application(self.app_name, terminate_env_by_force=True)
def application_exists(self):
"""
Returns whether or not the given app_name exists
"""
response = self.ebs.describe_applications(application_names=[self.app_name])
return len(response['DescribeApplicationsResponse']['DescribeApplicationsResult']['Applications']) > 0
def create_environment(self, env_name, version_label=None,
solution_stack_name=None, cname_prefix=None, description=None,
option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'):
"""
Creates a new environment
"""
out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type))
self.ebs.create_environment(self.app_name, env_name,
version_label=version_label,
solution_stack_name=solution_stack_name,
cname_prefix=cname_prefix,
description=description,
option_settings=option_settings,
tier_type=tier_type,
tier_name=tier_name,
tier_version=tier_version)
def environment_exists(self, env_name, include_deleted=False):
"""
Returns whether or not the given environment exists
"""
response = self.ebs.describe_environments(application_name=self.app_name, environment_names=[env_name],
include_deleted=include_deleted)
return len(response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']) > 0 \
and response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'][0][
'Status'] != 'Terminated'
def environment_resources(self, env_name):
"""
Returns the description for the given environment's resources
"""
resp = self.ebs.describe_environment_resources(environment_name=env_name)
return resp['DescribeEnvironmentResourcesResponse']['DescribeEnvironmentResourcesResult']['EnvironmentResources']
def get_env_sizing_metrics(self, env_name):
asg = self.get_asg(env_name)
if asg:
return asg.min_size, asg.max_size, asg.desired_capacity
else:
return None, None, None
def get_asg(self, env_name):
asg_name = self.get_asg_name(env_name)
asgs = self.autoscale.get_all_groups(names=[asg_name])
asg = None
if asgs:
asg = asgs[0]
return asg
def get_asg_name(self, env_name):
resources = self.environment_resources(env_name)
name = resources["AutoScalingGroups"][0]["Name"]
return name
def set_env_sizing_metrics(self, env_name, min_size, max_size):
self.update_environment(env_name, option_settings=[
("aws:autoscaling:asg", "MinSize", min_size), ("aws:autoscaling:asg", "MaxSize", max_size)])
def environment_data(self, env_name):
"""
Returns the description for the given environment
"""
response = self.ebs.describe_environments(application_name=self.app_name, environment_names=[env_name],
include_deleted=False)
return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments'][0]
def rebuild_environment(self, env_name):
"""
Rebuilds an environment
"""
out("Rebuilding " + str(env_name))
self.ebs.rebuild_environment(environment_name=env_name)
def get_environments(self):
"""
Returns the environments
"""
response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)
return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
def delete_environment(self, environment_name):
"""
Deletes an environment
"""
self.ebs.terminate_environment(environment_name=environment_name, terminate_resources=True)
def update_environment(self, environment_name, description=None, option_settings=[], tier_type=None, tier_name=None,
tier_version='1.0'):
"""
Updates an application version
"""
out("Updating environment: " + str(environment_name))
messages = self.ebs.validate_configuration_settings(self.app_name, option_settings,
environment_name=environment_name)
messages = messages['ValidateConfigurationSettingsResponse']['ValidateConfigurationSettingsResult']['Messages']
ok = True
for message in messages:
if message['Severity'] == 'error':
ok = False
out("[" + message['Severity'] + "] " + str(environment_name) + " - '" \
+ message['Namespace'] + ":" + message['OptionName'] + "': " + message['Message'])
self.ebs.update_environment(
environment_name=environment_name,
description=description,
option_settings=option_settings,
tier_type=tier_type,
tier_name=tier_name,
tier_version=tier_version)
def get_previous_environment_for_subdomain(self, env_subdomain):
"""
Returns an environment name for the given cname
"""
def sanitize_subdomain(subdomain):
return subdomain.lower()
env_subdomain = sanitize_subdomain(env_subdomain)
def match_cname(cname):
subdomain = sanitize_subdomain(cname.split(".")[0])
return subdomain == env_subdomain
def match_candidate(env):
return env['Status'] != 'Terminated' \
and env.get('CNAME') \
and match_cname(env['CNAME'])
envs = self.get_environments()
candidates = [env for env in envs if match_candidate(env)]
match = None
if candidates:
match = candidates[0]["EnvironmentName"]
return match
def deploy_version(self, environment_name, version_label):
"""
Deploys a version to an environment
"""
out("Deploying " + str(version_label) + " to " + str(environment_name))
self.ebs.update_environment(environment_name=environment_name, version_label=version_label)
def get_versions(self):
"""
Returns the versions available
"""
response = self.ebs.describe_application_versions(application_name=self.app_name)
return response['DescribeApplicationVersionsResponse']['DescribeApplicationVersionsResult']['ApplicationVersions']
def create_application_version(self, version_label, key):
"""
Creates an application version
"""
out("Creating application version " + str(version_label) + " for " + str(key))
self.ebs.create_application_version(self.app_name, version_label,
s3_bucket=self.aws.bucket, s3_key=self.aws.bucket_path+key)
def delete_unused_versions(self, versions_to_keep=10):
"""
Deletes unused versions
"""
# get versions in use
environments = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)
environments = environments['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
versions_in_use = []
for env in environments:
versions_in_use.append(env['VersionLabel'])
# get all versions
versions = self.ebs.describe_application_versions(application_name=self.app_name)
versions = versions['DescribeApplicationVersionsResponse']['DescribeApplicationVersionsResult'][
'ApplicationVersions']
versions = sorted(versions, reverse=True, cmp=lambda x, y: cmp(x['DateCreated'], y['DateCreated']))
# delete versions in use
for version in versions[versions_to_keep:]:
if version['VersionLabel'] in versions_in_use:
out("Not deleting " + version["VersionLabel"] + " because it is in use")
else:
out("Deleting unused version: " + version["VersionLabel"])
self.ebs.delete_application_version(application_name=self.app_name,
version_label=version['VersionLabel'])
sleep(2)
def describe_events(self, environment_name, next_token=None, start_time=None):
"""
Describes events from the given environment
"""
events = self.ebs.describe_events(
application_name=self.app_name,
environment_name=environment_name,
next_token=next_token,
start_time=start_time)
return (events['DescribeEventsResponse']['DescribeEventsResult']['Events'], events['DescribeEventsResponse']['DescribeEventsResult']['NextToken'])
def wait_for_environments(self, environment_names, health=None, status=None, version_label=None,
include_deleted=True, use_events=True):
"""
Waits for an environment to have the given version_label
and to be in the green state
"""
# turn into a list
if not isinstance(environment_names, (list, tuple)):
environment_names = [environment_names]
environment_names = environment_names[:]
# print some stuff
s = "Waiting for environment(s) " + (", ".join(environment_names)) + " to"
if health is not None:
s += " have health " + health
else:
s += " have any health"
if version_label is not None:
s += " and have version " + version_label
if status is not None:
s += " and have status " + status
out(s)
started = time()
seen_events = list()
for env_name in environment_names:
(events, next_token) = self.describe_events(env_name, start_time=utcnow_isoformat())
for event in events:
seen_events.append(event)
delay = 10
while True:
# bail if they're all good
if len(environment_names) == 0:
break
# wait
sleep(delay)
# # get the env
try:
environments = self.ebs.describe_environments(
application_name=self.app_name,
environment_names=environment_names,
include_deleted=include_deleted)
except BotoServerError as e:
if not e.error_code == 'Throttling':
raise
delay = min(60, int(delay * 1.5))
out("Throttling: setting delay to " + str(delay) + " seconds")
continue
environments = environments['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
if len(environments) <= 0:
raise Exception("Couldn't find any environments")
# loop through and wait
for env in environments[:]:
env_name = env['EnvironmentName']
# the message
msg = "Environment " + env_name + " is " + str(env['Health'])
if version_label is not None:
msg = msg + " and has version " + str(env['VersionLabel'])
if status is not None:
msg = msg + " and has status " + str(env['Status'])
# what we're doing
good_to_go = True
if health is not None:
good_to_go = good_to_go and str(env['Health']) == health
if status is not None:
good_to_go = good_to_go and str(env['Status']) == status
if version_label is not None:
good_to_go = good_to_go and str(env['VersionLabel']) == version_label
# allow a certain number of Red samples before failing
if env['Status'] == 'Ready' and env['Health'] == 'Red':
if 'RedCount' not in env:
env['RedCount'] = 0
env['RedCount'] += 1
if env['RedCount'] > MAX_RED_SAMPLES:
out('Deploy failed')
raise Exception('Ready and red')
# log it
if good_to_go:
out(msg + " ... done")
environment_names.remove(env_name)
else:
out(msg + " ... waiting")
# log events
try:
(events, next_token) = self.describe_events(env_name, start_time=utcnow_isoformat())
except BotoServerError as e:
if not e.error_code == 'Throttling':
raise
delay = min(60, int(delay * 1.5))
out("Throttling: setting delay to " + str(delay) + " seconds")
break
for event in events:
if event not in seen_events:
out("["+event['Severity']+"] "+event['Message'])
seen_events.append(event)
# check the time
elapsed = time() - started
if elapsed > self.wait_time_secs:
message = "Wait time for environment(s) {environments} to be {health} expired".format(
environments=" and ".join(environment_names), health=(health or "Green")
)
raise Exception(message)
|
cookbrite/ebs-deploy
|
ebs_deploy/__init__.py
|
Python
|
mit
| 26,434 | 0.002951 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Eduard Trott
# @Date: 2015-09-15 08:57:35
# @Email: etrott@redhat.com
# @Last modified by: etrott
# @Last Modified time: 2015-12-17 16:53:17
version_info = ('0', '0', '1')
__version__ = '.'.join(version_info[0:3]) # + '-' + version_info[3]
|
maybelinot/bellring
|
bellring/_version.py
|
Python
|
gpl-3.0
| 305 | 0 |
from a10sdk.common.A10BaseClass import A10BaseClass
class PortReservation(A10BaseClass):
"""Class Description::
DS-Lite Static Port Reservation.
Class port-reservation supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param nat_end_port: {"description": "NAT End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param inside: {"optional": false, "type": "string", "description": "Inside User Address and Port Range (DS-Lite Inside User's Tunnel Source IPv6 Address)", "format": "ipv6-address"}
:param tunnel_dest_address: {"optional": false, "type": "string", "description": "DS-Lite Inside User's Tunnel Destination IPv6 Address", "format": "ipv6-address"}
:param inside_start_port: {"description": "Inside Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat: {"optional": false, "type": "string", "description": "NAT Port Range (NAT IP address)", "format": "ipv4-address"}
:param inside_end_port: {"description": "Inside End Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param nat_start_port: {"description": "NAT Start Port", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": false}
:param inside_addr: {"optional": false, "type": "string", "description": "Inside User IP address", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "inside","tunnel_dest_address","inside_addr","inside_start_port","inside_end_port","nat","nat_start_port","nat_end_port"]
self.b_key = "port-reservation"
self.a10_url="/axapi/v3/cgnv6/ds-lite/port-reservation/{inside}+{tunnel_dest_address}+{inside_addr}+{inside_start_port}+{inside_end_port}+{nat}+{nat_start_port}+{nat_end_port}"
self.DeviceProxy = ""
self.nat_end_port = ""
self.uuid = ""
self.inside = ""
self.tunnel_dest_address = ""
self.inside_start_port = ""
self.nat = ""
self.inside_end_port = ""
self.nat_start_port = ""
self.inside_addr = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
amwelch/a10sdk-python
|
a10sdk/core/cgnv6/cgnv6_ds_lite_port_reservation.py
|
Python
|
apache-2.0
| 2,890 | 0.010035 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag that performs two refresh operations on a Tableau Workbook aka Extract. The first one
waits until it succeeds. The second does not wait since this is an asynchronous operation and we don't know
when the operation actually finishes. That's why we have another task that checks only that.
"""
from datetime import timedelta
from airflow import DAG
from airflow.providers.tableau.operators.tableau_refresh_workbook import TableauRefreshWorkbookOperator
from airflow.providers.tableau.sensors.tableau_job_status import TableauJobStatusSensor
from airflow.utils.dates import days_ago
with DAG(
dag_id='example_tableau_refresh_workbook',
dagrun_timeout=timedelta(hours=2),
schedule_interval=None,
start_date=days_ago(2),
tags=['example'],
) as dag:
# Refreshes a workbook and waits until it succeeds.
task_refresh_workbook_blocking = TableauRefreshWorkbookOperator(
site_id='my_site',
workbook_name='MyWorkbook',
blocking=True,
task_id='refresh_tableau_workbook_blocking',
)
# Refreshes a workbook and does not wait until it succeeds.
task_refresh_workbook_non_blocking = TableauRefreshWorkbookOperator(
site_id='my_site',
workbook_name='MyWorkbook',
blocking=False,
task_id='refresh_tableau_workbook_non_blocking',
)
# The following task queries the status of the workbook refresh job until it succeeds.
task_check_job_status = TableauJobStatusSensor(
site_id='my_site',
job_id="{{ ti.xcom_pull(task_ids='refresh_tableau_workbook_non_blocking') }}",
task_id='check_tableau_job_status',
)
task_refresh_workbook_non_blocking >> task_check_job_status
|
dhuang/incubator-airflow
|
airflow/providers/tableau/example_dags/example_tableau_refresh_workbook.py
|
Python
|
apache-2.0
| 2,507 | 0.002792 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing keypairs.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django import http
from django.template.defaultfilters import slugify # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView # noqa
from django.views.generic import View # noqa
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateKeypair
template_name = 'project/access_and_security/keypairs/create.html'
success_url = 'horizon:project:access_and_security:keypairs:download'
def get_success_url(self):
return reverse(self.success_url,
kwargs={"keypair_name": self.request.POST['name']})
class ImportView(forms.ModalFormView):
form_class = project_forms.ImportKeypair
template_name = 'project/access_and_security/keypairs/import.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_id(self, keypair):
return keypair.name
class DownloadView(TemplateView):
def get_context_data(self, keypair_name=None):
return {'keypair_name': keypair_name}
template_name = 'project/access_and_security/keypairs/download.html'
class GenerateView(View):
def get(self, request, keypair_name=None):
try:
keypair = api.nova.keypair_create(request, keypair_name)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to create key pair: %(exc)s'),
redirect=redirect)
response = http.HttpResponse(content_type='application/binary')
response['Content-Disposition'] = \
'attachment; filename=%s.pem' % slugify(keypair.name)
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
|
gochist/horizon
|
openstack_dashboard/dashboards/project/access_and_security/keypairs/views.py
|
Python
|
apache-2.0
| 3,012 | 0.000332 |
import os
import gc
import platform
import sys
import time
import tempfile
import warnings
from optparse import OptionParser
import gpaw.mpi as mpi
from gpaw.hooks import hooks
from gpaw import debug
from gpaw.version import version
def run():
description = ('Run the GPAW test suite. The test suite can be run in '
'parallel with MPI through gpaw-python. The test suite '
'supports 1, 2, 4 or 8 CPUs although some tests are '
'skipped for some parallelizations. If no TESTs are '
'given, run all tests supporting the parallelization.')
parser = OptionParser(usage='%prog [OPTION...] [TEST...]',
description=description,
version='%%prog %s' % version)
parser.add_option('-x', '--exclude',
type='string', default=None,
help='Exclude tests (comma separated list of tests).',
metavar='test1.py,test2.py,...')
parser.add_option('-f', '--run-failed-tests-only',
action='store_true',
help='Run failed tests only.')
parser.add_option('--from', metavar='TESTFILE', dest='from_test',
help='Run remaining tests, starting from TESTFILE')
parser.add_option('--after', metavar='TESTFILE', dest='after_test',
help='Run remaining tests, starting after TESTFILE')
parser.add_option('--range',
type='string', default=None,
help='Run tests in range test_i.py to test_j.py '
'(inclusive)',
metavar='test_i.py,test_j.py')
parser.add_option('-j', '--jobs', type='int', default=1,
help='Run JOBS threads. Each test will be executed '
'in serial by one thread. This option cannot be used '
'for parallelization together with MPI.')
parser.add_option('--reverse', action='store_true',
help=('Run tests in reverse order (less overhead with '
'multiple jobs)'))
parser.add_option('-k', '--keep-temp-dir', action='store_true',
dest='keep_tmpdir', help='Do not delete temporary files.')
parser.add_option('-d', '--directory', help='Run test in this directory')
parser.add_option('-s', '--show-output', action='store_true',
help='Show standard output from tests.')
opt, tests = parser.parse_args()
if len(tests) == 0:
from gpaw.test import tests
if opt.reverse:
tests.reverse()
if opt.run_failed_tests_only:
tests = [line.strip() for line in open('failed-tests.txt')]
exclude = []
if opt.exclude is not None:
exclude += opt.exclude.split(',')
if opt.from_test:
fromindex = tests.index(opt.from_test)
tests = tests[fromindex:]
if opt.after_test:
index = tests.index(opt.after_test) + 1
tests = tests[index:]
if opt.range:
# default start(stop) index is first(last) test
indices = opt.range.split(',')
try:
start_index = tests.index(indices[0])
except ValueError:
start_index = 0
try:
stop_index = tests.index(indices[1]) + 1
except ValueError:
stop_index = len(tests)
tests = tests[start_index:stop_index]
if opt.jobs > 1:
exclude.append('maxrss.py')
for test in exclude:
if test in tests:
tests.remove(test)
from gpaw.test import TestRunner
if mpi.world.size > 8:
if mpi.rank == 0:
message = '!!!!!!!\n' \
'GPAW regression test suite was not designed to run on more\n' \
'than 8 MPI tasks. Re-run test suite using 1, 2, 4 or 8 MPI\n' \
'tasks instead.'
warnings.warn(message, RuntimeWarning)
old_hooks = hooks.copy()
hooks.clear()
if mpi.rank == 0:
if opt.directory is None:
tmpdir = tempfile.mkdtemp(prefix='gpaw-test-')
else:
tmpdir = opt.directory
if os.path.isdir(tmpdir):
opt.keep_tmpdir = True
else:
os.mkdir(tmpdir)
else:
tmpdir = None
tmpdir = mpi.broadcast_string(tmpdir)
cwd = os.getcwd()
os.chdir(tmpdir)
operating_system = platform.system() + ' ' + platform.machine()
operating_system += ' ' + ' '.join(platform.dist())
python = platform.python_version() + ' ' + platform.python_compiler()
python += ' ' + ' '.join(platform.architecture())
if mpi.rank == 0:
print('python %s on %s' % (python, operating_system))
print('Running tests in %s' % tmpdir)
print('Jobs: %d, Cores: %d, debug-mode: %r' % (opt.jobs, mpi.size,
debug))
failed = TestRunner(tests, jobs=opt.jobs, show_output=opt.show_output).run()
os.chdir(cwd)
if mpi.rank == 0:
if len(failed) > 0:
open('failed-tests.txt', 'w').write('\n'.join(failed) + '\n')
elif not opt.keep_tmpdir:
os.system('rm -rf ' + tmpdir)
hooks.update(old_hooks.items())
return len(failed)
if __name__ == '__main__':
run()
|
robwarm/gpaw-symm
|
gpaw/test/test.py
|
Python
|
gpl-3.0
| 5,364 | 0.001119 |
"""
Copyright 2011 Marcus Fedarko
Contact Email: marcus.fedarko@gmail.com
This file is part of CAIP.
CAIP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CAIP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CAIP. If not, see <http://www.gnu.org/licenses/>.
====
LevelReader.py
-----
class LevelReader: reads through a level and
creates cells, which are added to a sprite group
in SpriteManager.
"""
import Cell
import Levels
from Config import *
import pygame
from pygame.locals import *
class LevelReader(object):
"""Reads through a given level list and sets up the cells
in the level."""
def __init__(self, levelnum, sprite_manager):
"""Sets some attributes of the LevelReader."""
self.levelnum = levelnum
self.sprite_manager = sprite_manager
# Dict relating topleft of cells to the cell.
# Used in CellManager.
self.topleftToCell = {}
def read(self):
"""Reads through Levels.level1 and creates cells."""
x = 0
y = 0
for string in Levels.level1:
for char in string:
if char == "#":
c = Cell.Cell((x, y), True)
else:
c = Cell.Cell((x, y), False)
self.sprite_manager.cells.add(c)
self.topleftToCell[(x, y)] = c
x += TILESIZE[0]
y += TILESIZE[1]
x = 0
|
fedarko/CAIP
|
Code/LevelReader.py
|
Python
|
gpl-3.0
| 1,886 | 0.003181 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# qrgen1.py
#
# Copyright 2013 psutton <zleap@zleap.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from qrtools import QR
myCode = QR(filename=u"/home/psutton/Documents/Python/qrcodes/qrcode.png")
if myCode.decode():
print myCode.data
print myCode.data_type
print myCode.data_to_string()
|
zleap/python-qrcode
|
qrreadfromfile.py
|
Python
|
gpl-3.0
| 1,039 | 0.008662 |
import xml.etree.ElementTree as ET
import datetime
import sys
import openpyxl
import re
import dateutil
def main():
print 'Number of arguments:', len(sys.argv), 'arguments.' #DEBUG
print 'Argument List:', str(sys.argv) #DEBUG
Payrate = raw_input("Enter your pay rate: ") #DEBUG
sNumber = raw_input("Enter 900#: ") #DEBUG
xml = ET.parse("xml.xml") #DEBUG
root = xml.getroot()
root = root[3][0] #Go directly to worksheet/table
sheet = openpyxl.load_workbook(sys.argv[1], data_only=True).active
writeName(root)
writeEmployeeNum(root)
writeStudentNum(sNumber)
writePayRate(payRate)
#At this point all that is left are the times
for x in root.findall(".//*"):
if x.text != None:
dates.append(x.text)
for x in char_range('G','Z'):
writeTimes(x + '17' , dates)
def writeTimes (position, dateList):
match = next(x[0] for x in enumerate(dateList) if x[1] == sheet[position].value)
jobCode = dateList[num+4]
if jobCode == 900:
raise error("Cannot start day with 900 break")
else:
sheet[date] = roundTime(
def roundTime(time):
date = dateutil.parser.parse(x)
if date.minute <= 7
return date.replace(minute=0)
else if date.minute >= 8 and date.minute <= 22:
return date.replace(minute=15)
else if date.minute >= 23 and date.minute <= 37:
return date.replace(minute=30)
else if date.minute >= 38 and date.minute <= 52:
return date.replace(minute=45)
else if date.minute >= 53:
if date.hour == 23:
raise error("Worked overnight or did not clock out")
else:
date += datetime.timedelta(minutes= (60-date.minute))
#Rounds time to next hour by adding minutes until 60
return date
else:
raise error("Something went wrong in roundTime")
def writeName(tree):
name = tree[-1][4][0].text
sheet['I8'] = name
def writeEmployeeNum(tree):
num = root[2][0][0].text
sheet['4D'] = re.match('.*?([0-9]+)$', num).group(1)
def writeStudentNum(num):
sheet['8S']=num
def writePayRate(num):
sheet['6k']=num
def char_range(c1, c2):
"""Generates the characters from `c1` to `c2`, inclusive."""
"""Courtesy http://stackoverflow.com/questions/7001144/range-over-character-in-python"""
for c in xrange(ord(c1), ord(c2)+1):
yield chr(c)
main()
|
JamesPavek/payroll
|
timesheet.py
|
Python
|
mit
| 2,615 | 0.047419 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TtTrip.shape'
db.add_column(u'timetable_tttrip', 'shape',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['timetable.TtShape'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TtTrip.shape'
db.delete_column(u'timetable_tttrip', 'shape_id')
models = {
u'timetable.ttshape': {
'Meta': {'object_name': 'TtShape'},
'gtfs_shape_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.TextField', [], {})
},
u'timetable.ttstop': {
'Meta': {'object_name': 'TtStop'},
'gtfs_stop_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop_lat': ('django.db.models.fields.FloatField', [], {}),
'stop_lon': ('django.db.models.fields.FloatField', [], {}),
'stop_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stop_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'timetable.ttstoptime': {
'Meta': {'object_name': 'TtStopTime'},
'exp_arrival': ('django.db.models.fields.DateTimeField', [], {}),
'exp_departure': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtStop']"}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtTrip']"})
},
u'timetable.tttrip': {
'Meta': {'object_name': 'TtTrip'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gtfs_trip_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['timetable.TtShape']", 'null': 'True'})
}
}
complete_apps = ['timetable']
|
hasadna/OpenTrain
|
webserver/opentrain/timetable/migrations/0013_auto__add_field_tttrip_shape.py
|
Python
|
bsd-3-clause
| 2,893 | 0.005876 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugger Wrapper Session Consisting of a Local Curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
# Google-internal import(s).
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import stepper_cli
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
_DUMP_ROOT_PREFIX = "tfdbg_"
class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self, sess, dump_root=None, log_usage=True, ui_type="curses"):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards.
log_usage: (`bool`) whether the usage of this class is to be logged.
ui_type: (`str`) requested UI type. Currently supported:
(curses | readline)
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
dump_root is a file.
"""
if log_usage:
pass # No logging for open-source.
framework.BaseDebugWrapperSession.__init__(self, sess)
if dump_root is None:
self._dump_root = tempfile.mktemp(prefix=_DUMP_ROOT_PREFIX)
else:
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._is_run_start = True
self._ui_type = ui_type
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Invoke stepper (cont, step, breakpoint, etc.)",
usage=argparse.SUPPRESS)
self._argparsers["invoke_stepper"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Invoke the CLI to let user choose what action to take:
`run` / `invoke_stepper`.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
Raises:
RuntimeError: If user chooses to prematurely exit the debugger.
"""
self._is_run_start = True
self._update_run_calls_state(request.run_call_count, request.fetches,
request.feed_dict)
if self._active_tensor_filter:
# If we are running till a filter passes, we just need to keep running
# with the DEBUG_RUN option.
return framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls())
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls())
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli()
if self._run_through_times > 1:
self._run_through_times -= 1
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
return self._run_start_response
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cli = ui_factory.get_ui(self._ui_type)
help_intro = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
# Show logo at the onset of the first run.
help_intro.extend(cli_shared.get_tfdbg_logo())
help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
help_intro.extend(self._run_info)
self._run_cli.set_help_intro(help_intro)
# Create initial screen output detailing the run.
self._title = "run-start: " + self._run_description
self._init_command = "run_info"
self._title_color = "blue_on_white"
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
self._is_run_start = False
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
debug_dump.set_python_graph(self._sess.graph)
passed_filter = None
if self._active_tensor_filter:
if not debug_dump.find(
self._tensor_filters[self._active_tensor_filter], first_n=1):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
self._remove_dump_root()
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
passed_filter = self._active_tensor_filter
self._active_tensor_filter = None
self._prep_cli_for_run_end(debug_dump, request.tf_error, passed_filter)
self._run_start_response = self._launch_cli()
# Clean up the dump generated by this run.
self._remove_dump_root()
else:
# No debug information to show following a non-debug run() call.
self._run_start_response = None
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _prep_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
"""Prepare (but not launch) CLI for run-end, with debug dump from the run.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
run.
tf_error: (None or OpError) OpError that happened during the run() call
(if any).
passed_filter: (None or str) Name of the tensor filter that just passed
and caused the preparation of this run-end CLI (if any).
"""
if tf_error:
help_intro = cli_shared.get_error_intro(tf_error)
self._init_command = "help"
self._title_color = "red_on_white"
else:
help_intro = None
self._init_command = "lt"
self._title_color = "black_on_white"
if passed_filter is not None:
# Some dumped tensor(s) from this run passed the filter.
self._init_command = "lt -f %s" % passed_filter
self._title_color = "red_on_white"
self._run_cli = analyzer_cli.create_analyzer_ui(
debug_dump, self._tensor_filters, ui_type=self._ui_type)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
self._run_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
self._title = "run-end: " + self._run_description
if help_intro:
self._run_cli.set_help_intro(help_intro)
def _launch_cli(self):
"""Launch the interactive command-line interface.
Returns:
The OnRunStartResponse specified by the user using the "run" command.
"""
self._register_this_run_info(self._run_cli)
response = self._run_cli.run_ui(
init_command=self._init_command,
title=self._title,
title_color=self._title_color)
return response
def _run_info_handler(self, args, screen_info=None):
output = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
output.extend(cli_shared.get_tfdbg_logo())
output.extend(self._run_info)
if (not self._is_run_start and
debugger_cli_common.MAIN_MENU_KEY in output.annotations):
menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY]
if "list_tensors" not in menu.captions():
menu.insert(
0, debugger_cli_common.MenuItem("list_tensors", "list_tensors"))
return output
def _run_handler(self, args, screen_info=None):
"""Command handler for "run" command during on-run-start."""
_ = screen_info # Currently unused.
parsed = self._argparsers["run"].parse_args(args)
if parsed.till_filter_pass:
# For the run-till-bad-numerical-value-appears mode, use the DEBUG_RUN
# option to access the intermediate tensors, and set the corresponding
# state flag of the class itself to True.
if parsed.till_filter_pass in self._tensor_filters:
action = framework.OnRunStartAction.DEBUG_RUN
self._active_tensor_filter = parsed.till_filter_pass
else:
# Handle invalid filter name.
return debugger_cli_common.RichTextLines(
["ERROR: tensor filter \"%s\" does not exist." %
parsed.till_filter_pass])
self._skip_debug = parsed.no_debug
self._run_through_times = parsed.times
if parsed.times > 1 or parsed.no_debug:
# If requested -t times > 1, the very next run will be a non-debug run.
action = framework.OnRunStartAction.NON_DEBUG_RUN
debug_urls = []
else:
action = framework.OnRunStartAction.DEBUG_RUN
debug_urls = self._get_run_debug_urls()
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(action, debug_urls))
def _register_this_run_info(self, curses_cli):
curses_cli.register_command_handler(
"run",
self._run_handler,
self._argparsers["run"].format_help(),
prefix_aliases=["r"])
curses_cli.register_command_handler(
"invoke_stepper",
self._on_run_start_step_handler,
self._argparsers["invoke_stepper"].format_help(),
prefix_aliases=["s"])
curses_cli.register_command_handler(
"run_info",
self._run_info_handler,
self._argparsers["run_info"].format_help(),
prefix_aliases=["ri"])
if self._tensor_filters:
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
def _on_run_start_step_handler(self, args, screen_info=None):
"""Command handler for "invoke_stepper" command during on-run-start."""
_ = screen_info # Currently unused.
# No parsing is currently necessary for invoke_stepper. This may change
# in the future when the command has arguments.
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.INVOKE_STEPPER, []))
def _get_run_debug_urls(self):
"""Get the debug_urls value for the current run() call.
Returns:
debug_urls: (list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
"""
return ["file://" + self._dump_root]
def _update_run_calls_state(self, run_call_count, fetches, feed_dict):
"""Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
"""
self._run_call_count = run_call_count
self._run_description = cli_shared.get_run_short_description(run_call_count,
fetches,
feed_dict)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(run_call_count,
fetches,
feed_dict,
self._tensor_filters)
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Overrides method in base class to implement interactive node stepper.
Args:
node_stepper: (`stepper.NodeStepper`) The underlying NodeStepper API
object.
restore_variable_values_on_exit: (`bool`) Whether any variables whose
values have been altered during this node-stepper invocation should be
restored to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
stepper = stepper_cli.NodeStepperCLI(node_stepper)
# On exiting the node-stepper CLI, the finalize method of the node_stepper
# object will be called, ensuring that the state of the graph will be the
# same as if the stepping did not happen.
# TODO(cais): Perhaps some users will want the effect of the interactive
# stepping and value injection to persist. When that happens, make the call
# to finalize optional.
stepper_ui = ui_factory.get_ui(
self._ui_type,
on_ui_exit=(node_stepper.restore_variable_values if
restore_variable_values_on_exit else None))
stepper_ui.register_command_handler(
"list_sorted_nodes",
stepper.list_sorted_nodes,
stepper.arg_parsers["list_sorted_nodes"].format_help(),
prefix_aliases=["lt", "lsn"])
stepper_ui.register_command_handler(
"cont",
stepper.cont,
stepper.arg_parsers["cont"].format_help(),
prefix_aliases=["ct", "c"])
stepper_ui.register_command_handler(
"step",
stepper.step,
stepper.arg_parsers["step"].format_help(),
prefix_aliases=["st", "s"])
stepper_ui.register_command_handler(
"print_tensor",
stepper.print_tensor,
stepper.arg_parsers["print_tensor"].format_help(),
prefix_aliases=["pt"])
stepper_ui.register_command_handler(
"inject_value",
stepper.inject_value,
stepper.arg_parsers["inject_value"].format_help(),
prefix_aliases=["inject", "override_value", "override"])
# Register tab completion candidates.
stepper_ui.register_tab_comp_context([
"cont", "ct", "c", "pt", "inject_value", "inject", "override_value",
"override"
], [str(elem) for elem in node_stepper.sorted_nodes()])
# TODO(cais): Tie up register_tab_comp_context to a single alias to shorten
# calls like this.
return stepper_ui.run_ui(
init_command="lt",
title="Node Stepper: " + self._run_description,
title_color="blue_on_white")
|
HKUST-SING/tensorflow
|
tensorflow/python/debug/wrappers/local_cli_wrapper.py
|
Python
|
apache-2.0
| 20,594 | 0.004079 |
from __future__ import division
from pySDC.Hooks import hooks
from pySDC.Stats import stats
import matplotlib.pyplot as plt
import numpy as np
class particles_output(hooks):
def __init__(self):
"""
Initialization of particles output
"""
super(particles_output,self).__init__()
# add figure object for further use
fig = plt.figure()
self.ax = fig.add_subplot(111)
self.ax.set_xlim([-1.5,1.5])
self.ax.set_ylim([-1.5,1.5])
plt.ion()
self.sframe = None
def dump_step(self,status):
"""
Overwrite standard dump per step
Args:
status: status object per step
"""
super(particles_output,self).dump_step(status)
# some abbreviations
L = self.level
u = L.uend
R = np.linalg.norm(u.pos.values)
H = 1/2*np.dot(u.vel.values,u.vel.values)+0.02/R
stats.add_to_stats(step=status.step, time=status.time, type='energy', value=H)
oldcol = self.sframe
# self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1],L.uend.pos.values[2])
self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1])
# Remove old line collection before drawing
if oldcol is not None:
self.ax.collections.remove(oldcol)
plt.pause(0.00001)
return None
|
torbjoernk/pySDC
|
examples/spiraling_particle/HookClass.py
|
Python
|
bsd-2-clause
| 1,411 | 0.008505 |
__version__ = "1.0.3"
|
flashingpumpkin/filerotate
|
filerotate/__version__.py
|
Python
|
mit
| 21 | 0.047619 |
# -*- Mode: Python; test-case-name: flumotion.test.test_admin_multi -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from flumotion.common import testsuite
from twisted.internet import defer
from flumotion.admin import multi
from flumotion.common import connection
from flumotion.twisted import pb
class MultiAdminTest(testsuite.TestCaseWithManager):
def testConstructor(self):
model = multi.MultiAdminModel()
def testConnectSuccess(self):
def connected(_):
self.assertEqual(len(self.vishnu.adminHeaven.avatars),
1)
return m.removeManager(str(self.connectionInfo))
m = multi.MultiAdminModel()
d = m.addManager(self.connectionInfo, writeConnection=False)
d.addCallback(connected)
return d
def testConnectFailure(self):
def connected(_):
self.fail('should not have connected')
def failure(f):
# ok!
self.assertEqual(len(self.vishnu.adminHeaven.avatars), 0)
self.assertEqual(m.admins, {})
self.assertEqual(m._reconnectHandlerIds, {})
m = multi.MultiAdminModel()
i = connection.PBConnectionInfo(self.connectionInfo.host,
self.connectionInfo.port,
self.connectionInfo.use_ssl,
pb.Authenticator(username='user',
password='pest'))
d = m.addManager(i, writeConnection=False)
d.addCallbacks(connected, failure)
return d
def testReconnect(self):
class Listener:
def __init__(self):
self.disconnectDeferred = defer.Deferred()
self.reconnectDeferred = defer.Deferred()
def model_addPlanet(self, admin, planet):
self.reconnectDeferred.callback(admin)
self.reconnectDeferred = None
def model_removePlanet(self, admin, planet):
self.disconnectDeferred.callback(admin)
self.disconnectDeferred = None
Listener = Listener()
def connected(_):
self.assertEqual(len(self.vishnu.adminHeaven.avatars),
1)
a = m.admins[str(self.connectionInfo)]
m.addListener(Listener)
a.clientFactory.disconnect()
return Listener.disconnectDeferred
def disconnected(_):
return Listener.reconnectDeferred
def reconnected(_):
m.removeListener(Listener)
return m.removeManager(str(self.connectionInfo))
m = multi.MultiAdminModel()
d = m.addManager(self.connectionInfo, writeConnection=False)
d.addCallback(connected)
d.addCallback(disconnected)
d.addCallback(reconnected)
return d
|
flyapen/UgFlu
|
flumotion/test/test_admin_multi.py
|
Python
|
gpl-2.0
| 3,725 | 0 |
# -*- coding: UTF-8 -*-
#
# (c) 2010 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from time import time
START_TIME = time()
|
jfmorcillo/mss
|
mss/agent/__init__.py
|
Python
|
gpl-3.0
| 844 | 0 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import web_contents
DEFAULT_TAB_TIMEOUT = 60
class Tab(web_contents.WebContents):
"""Represents a tab in the browser
The important parts of the Tab object are in the runtime and page objects.
E.g.:
# Navigates the tab to a given url.
tab.Navigate('http://www.google.com/')
# Evaluates 1+1 in the tab's JavaScript context.
tab.Evaluate('1+1')
"""
def __init__(self, inspector_backend):
super(Tab, self).__init__(inspector_backend)
def __del__(self):
super(Tab, self).__del__()
@property
def dom_stats(self):
"""A dictionary populated with measured DOM statistics.
Currently this dictionary contains:
{
'document_count': integer,
'node_count': integer,
'event_listener_count': integer
}
"""
dom_counters = self._inspector_backend.GetDOMStats(
timeout=DEFAULT_TAB_TIMEOUT)
assert (len(dom_counters) == 3 and
all([x in dom_counters for x in ['document_count', 'node_count',
'event_listener_count']]))
return dom_counters
def Activate(self):
"""Brings this tab to the foreground asynchronously.
Not all browsers or browser versions support this method.
Be sure to check browser.supports_tab_control.
Please note: this is asynchronous. There is a delay between this call
and the page's documentVisibilityState becoming 'visible', and yet more
delay until the actual tab is visible to the user. None of these delays
are included in this call."""
self._inspector_backend.Activate()
@property
def screenshot_supported(self):
"""True if the browser instance is capable of capturing screenshots"""
return self._inspector_backend.screenshot_supported
def Screenshot(self, timeout=DEFAULT_TAB_TIMEOUT):
"""Capture a screenshot of the window for rendering validation"""
return self._inspector_backend.Screenshot(timeout)
def PerformActionAndWaitForNavigate(
self, action_function, timeout=DEFAULT_TAB_TIMEOUT):
"""Executes action_function, and waits for the navigation to complete.
action_function must be a Python function that results in a navigation.
This function returns when the navigation is complete or when
the timeout has been exceeded.
"""
self._inspector_backend.PerformActionAndWaitForNavigate(
action_function, timeout)
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout=DEFAULT_TAB_TIMEOUT):
"""Navigates to url.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
self._inspector_backend.Navigate(url, script_to_evaluate_on_commit, timeout)
def GetCookieByName(self, name, timeout=DEFAULT_TAB_TIMEOUT):
"""Returns the value of the cookie by the given |name|."""
return self._inspector_backend.GetCookieByName(name, timeout)
|
codenote/chromium-test
|
tools/telemetry/telemetry/core/tab.py
|
Python
|
bsd-3-clause
| 3,230 | 0.005263 |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from rally.deployment.engines import devstack
from tests.unit import test
SAMPLE_CONFIG = {
"type": "DevstackEngine",
"provider": {
"name": "ExistingServers",
"credentials": [{"user": "root", "host": "example.com"}],
},
"localrc": {
"ADMIN_PASSWORD": "secret",
},
}
DEVSTACK_REPO = "https://git.openstack.org/openstack-dev/devstack"
class DevstackEngineTestCase(test.TestCase):
def setUp(self):
super(DevstackEngineTestCase, self).setUp()
self.deployment = {
"uuid": "de641026-dbe3-4abe-844a-ffef930a600a",
"config": SAMPLE_CONFIG,
}
self.engine = devstack.DevstackEngine(self.deployment)
def test_invalid_config(self):
self.deployment = SAMPLE_CONFIG.copy()
self.deployment["config"] = {"type": 42}
engine = devstack.DevstackEngine(self.deployment)
self.assertRaises(jsonschema.ValidationError,
engine.validate)
def test_construct(self):
self.assertEqual(self.engine.localrc["ADMIN_PASSWORD"], "secret")
@mock.patch("rally.deployment.engines.devstack.open", create=True)
def test_prepare_server(self, mock_open):
mock_open.return_value = "fake_file"
server = mock.Mock()
server.password = "secret"
self.engine.prepare_server(server)
calls = [
mock.call("/bin/sh -e", stdin="fake_file"),
mock.call("chpasswd", stdin="rally:secret"),
]
self.assertEqual(calls, server.ssh.run.mock_calls)
filename = mock_open.mock_calls[0][1][0]
self.assertTrue(filename.endswith("rally/deployment/engines/"
"devstack/install.sh"))
self.assertEqual([mock.call(filename, "rb")], mock_open.mock_calls)
@mock.patch("rally.deployment.engine.Engine.get_provider")
@mock.patch("rally.deployment.engines.devstack.get_updated_server")
@mock.patch("rally.deployment.engines.devstack.get_script")
@mock.patch("rally.deployment.serverprovider.provider.Server")
@mock.patch("rally.deployment.engines.devstack.objects.Endpoint")
def test_deploy(self, mock_endpoint, mock_server, mock_get_script,
mock_get_updated_server, mock_engine_get_provider):
mock_engine_get_provider.return_value = fake_provider = (
mock.Mock()
)
server = mock.Mock(host="host")
mock_endpoint.return_value = "fake_endpoint"
mock_get_updated_server.return_value = ds_server = mock.Mock()
mock_get_script.return_value = "fake_script"
server.get_credentials.return_value = "fake_credentials"
fake_provider.create_servers.return_value = [server]
with mock.patch.object(self.engine, "deployment") as mock_deployment:
endpoints = self.engine.deploy()
self.assertEqual({"admin": "fake_endpoint"}, endpoints)
mock_endpoint.assert_called_once_with(
"http://host:5000/v2.0/", "admin", "secret", "admin", "admin")
mock_deployment.add_resource.assert_called_once_with(
info="fake_credentials",
provider_name="DevstackEngine",
type="credentials")
repo = "https://git.openstack.org/openstack-dev/devstack"
cmd = "/bin/sh -e -s %s master" % repo
server.ssh.run.assert_called_once_with(cmd, stdin="fake_script")
ds_calls = [
mock.call.ssh.run("cat > ~/devstack/localrc", stdin=mock.ANY),
mock.call.ssh.run("~/devstack/stack.sh")
]
self.assertEqual(ds_calls, ds_server.mock_calls)
localrc = ds_server.mock_calls[0][2]["stdin"]
self.assertIn("ADMIN_PASSWORD=secret", localrc)
|
afaheem88/rally
|
tests/unit/deployment/engines/test_devstack.py
|
Python
|
apache-2.0
| 4,402 | 0 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 40000, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 40000, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False,
"Train using fp16 instead of fp32.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size,
FLAGS.fr_vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model
def train():
"""Train a en->fr translation model using WMT data."""
# Prepare WMT data.
print("Preparing WMT data in %s" % FLAGS.data_dir)
en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_data(
FLAGS.data_dir, FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(float(loss)) if loss < 300 else float("inf")
print ("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(float(eval_loss)) if eval_loss < 300 else float(
"inf")
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
def decode():
with tf.Session() as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab%d.en" % FLAGS.en_vocab_size)
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab%d.fr" % FLAGS.fr_vocab_size)
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([tf.compat.as_str(rev_fr_vocab[output]) for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def self_test():
"""Test the translation model."""
with tf.Session() as sess:
print("Self-test for neural translation model.")
# Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
5.0, 32, 0.3, 0.99, num_samples=8)
sess.run(tf.initialize_all_variables())
# Fake data set for both the (3, 3) and (6, 6) bucket.
data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
[([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
for _ in xrange(5): # Train the fake model for 5 steps.
bucket_id = random.choice([0, 1])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
data_set, bucket_id)
model.step(sess, encoder_inputs, decoder_inputs, target_weights,
bucket_id, False)
def main(_):
if FLAGS.self_test:
self_test()
elif FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run()
|
Klaminite1337/Paragon
|
inc/VOCAL/translate.py
|
Python
|
mit
| 12,795 | 0.005784 |
"""Abstract CarType.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from smartcard.Exceptions import InvalidATRMaskLengthException
from smartcard.System import readers
from smartcard.util import toHexString
class CardType(object):
"""Abstract base class for CardTypes.
Known sub-classes: L{smartcard.CardType.AnyCardType}
L{smartcard.CardType.ATRCardType}."""
def __init__(self):
"""CardType constructor."""
pass
def matches(self, atr, reader=None):
"""Returns true if atr and card connected match the CardType.
@param atr: the atr to chek for matching
@param reader: the reader (optional); default is None
The reader can be use in some sub-classes to do advanced
matching that require connecting to the card."""
pass
class AnyCardType(CardType):
"""The AnyCardType matches any card."""
def __init__(self):
super().__init__()
def matches(self, atr, reader=None):
"""Always returns true, i.e. AnyCardType matches any card.
@param atr: the atr to chek for matching
@param reader: the reader (optional); default is None"""
return True
class ATRCardType(CardType):
"""The ATRCardType defines a card from an ATR and a mask."""
def __init__(self, atr, mask=None):
"""ATRCardType constructor.
@param atr: the ATR of the CardType
@param mask: an optional mask to be applied to the ATR for
L{CardType} matching default is None
"""
super().__init__()
self.atr = list(atr)
self.mask = mask
if mask is None:
self.maskedatr = self.atr
else:
if len(self.atr) != len(self.mask):
raise InvalidATRMaskLengthException(toHexString(mask))
self.maskedatr = list(map(lambda x, y: x & y, self.atr, self.mask))
def matches(self, atr, reader=None):
"""Returns true if the atr matches the masked CardType atr.
@param atr: the atr to chek for matching
@param reader: the reader (optional); default is None
When atr is compared to the CardType ATR, matches returns true if
and only if CardType.atr & CardType.mask = atr & CardType.mask,
where & is the bitwise logical AND."""
if len(atr) != len(self.atr):
return not True
if self.mask is not None:
maskedatr = list(map(lambda x, y: x & y, list(atr), self.mask))
else:
maskedatr = atr
return self.maskedatr == maskedatr
if __name__ == '__main__':
"""Small sample illustrating the use of CardType.py."""
r = readers()
print(r)
connection = r[0].createConnection()
connection.connect()
atrct = ATRCardType([0x3B, 0x16, 0x94, 0x20, 0x02, 0x01, 0x00, 0x00, 0x0D])
print(atrct.matches(connection.getATR()))
|
LudovicRousseau/pyscard
|
smartcard/CardType.py
|
Python
|
lgpl-2.1
| 3,695 | 0 |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for producing network counter messages from Neutron notification
events.
"""
import oslo.messaging
from oslo_config import cfg
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer.openstack.common import log
from ceilometer import sample
OPTS = [
cfg.StrOpt('neutron_control_exchange',
default='neutron',
help="Exchange name for Neutron notifications.",
deprecated_name='quantum_control_exchange'),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
class NetworkNotificationBase(plugin_base.NotificationBase):
resource_name = None
@property
def event_types(self):
return [
# NOTE(flwang): When the *.create.start notification sending,
# there is no resource id assigned by Neutron yet. So we ignore
# the *.create.start notification for now and only listen the
# *.create.end to make sure the resource id is existed.
'%s.create.end' % self.resource_name,
'%s.update.*' % self.resource_name,
'%s.exists' % self.resource_name,
# FIXME(dhellmann): Neutron delete notifications do
# not include the same metadata as the other messages,
# so we ignore them for now. This isn't ideal, since
# it may mean we miss charging for some amount of time,
# but it is better than throwing away the existing
# metadata for a resource when it is deleted.
# '%s.delete.start' % (self.resource_name),
]
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target
This sequence is defining the exchange and topics to be connected for
this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.neutron_control_exchange)
for topic in conf.notification_topics]
def process_notification(self, message):
LOG.info(_('network notification %r') % message)
counter_name = getattr(self, 'counter_name', self.resource_name)
unit_value = getattr(self, 'unit', self.resource_name)
resource = message['payload'].get(self.resource_name)
if resource:
# NOTE(liusheng): In %s.update.start notifications, the id is in
# message['payload'] instead of resource itself.
if message['event_type'].endswith('update.start'):
resource['id'] = message['payload']['id']
resources = [resource]
else:
resources = message['payload'].get(self.resource_name + 's')
resource_message = message.copy()
for resource in resources:
resource_message['payload'] = resource
yield sample.Sample.from_notification(
name=counter_name,
type=sample.TYPE_GAUGE,
unit=unit_value,
volume=1,
user_id=resource_message['_context_user_id'],
project_id=resource_message['_context_tenant_id'],
resource_id=resource['id'],
message=resource_message)
event_type_split = resource_message['event_type'].split('.')
if len(event_type_split) > 2:
yield sample.Sample.from_notification(
name=counter_name
+ "." + event_type_split[1],
type=sample.TYPE_DELTA,
unit=unit_value,
volume=1,
user_id=resource_message['_context_user_id'],
project_id=resource_message['_context_tenant_id'],
resource_id=resource['id'],
message=resource_message)
class Network(NetworkNotificationBase):
"""Listen for Neutron network notifications.
Handle network.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'network'
class Subnet(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle subnet.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'subnet'
class Port(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle port.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'port'
class Router(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle router.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'router'
class FloatingIP(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle floatingip.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'floatingip'
counter_name = 'ip.floating'
unit = 'ip'
class Bandwidth(NetworkNotificationBase):
"""Listen for Neutron notifications.
Listen in order to mediate with the metering framework.
"""
event_types = ['l3.meter']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='bandwidth',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes'],
user_id=None,
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['label_id'],
message=message)
class Pool(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle pool.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'pool'
counter_name = 'network.services.lb.pool'
class Vip(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle vip.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'vip'
counter_name = 'network.services.lb.vip'
class Member(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle member.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'member'
counter_name = 'network.services.lb.member'
class HealthMonitor(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle health_monitor.{create.end|update.*|exists} notifications
from neutron.
"""
resource_name = 'health_monitor'
counter_name = 'network.services.lb.health_monitor'
class Firewall(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle firewall.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'firewall'
counter_name = 'network.services.firewall'
class FirewallPolicy(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle firewall_policy.{create.end|update.*|exists} notifications
from neutron.
"""
resource_name = 'firewall_policy'
counter_name = 'network.services.firewall.policy'
class FirewallRule(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle firewall_rule.{create.end|update.*|exists} notifications
from neutron.
"""
resource_name = 'firewall_rule'
counter_name = 'network.services.firewall.rule'
class VPNService(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle vpnservice.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'vpnservice'
counter_name = 'network.services.vpn'
class IPSecPolicy(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle pool.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'ipsecpolicy'
counter_name = 'network.services.vpn.ipsecpolicy'
class IKEPolicy(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle ikepolicy.{create.end|update.*|exists} notifications from neutron.
"""
resource_name = 'ikepolicy'
counter_name = 'network.services.vpn.ikepolicy'
class IPSecSiteConnection(NetworkNotificationBase):
"""Listen for Neutron notifications.
Handle ipsec_site_connection.{create.end|update.*|exists}
notifications from neutron.
"""
resource_name = 'ipsec_site_connection'
counter_name = 'network.services.vpn.connections'
|
Juniper/ceilometer
|
ceilometer/network/notifications.py
|
Python
|
apache-2.0
| 8,892 | 0 |
scores = [60, 73, 81, 95, 34]
n = 0
total = 0
for x in scores:
n += 1
total += x
avg = total/n
print("for loop print")
print(total)
print(avg)
i = 1
x = 0
while i <= 50:
x += 1
i += 1
print("while loop print")
print(x)
print(i)
|
flake123p/ProjectH
|
Python/_Basics2_/A02_for_while/for_while.py
|
Python
|
gpl-3.0
| 238 | 0.016807 |
# A program that has a list of six colors and chooses one by random. The user can then has three chances to quess the right color. After the third attepmt the program outputs "Nope. The color I was thinking of was..."
import random
# this is the function that will execute the program
def program():
# These are the constants declaring what the colors are.
RED = 'red'
BLUE = 'blue'
GREEN = 'green'
ORANGE = 'orange'
PURPLE = 'purple'
PINK = 'pink'
class Color:
pass
c1 = Color()
c2 = Color()
c3 = Color()
guesses_made = 0
# This input causes the program to refer to you as your name.
c1.name = input('Hello! What is your name?\n')
c2.color = [BLUE, GREEN, RED, ORANGE, PURPLE, PINK]
# This randomizes what color is chosen
c2.color = random.choice(c2.color)
print ('Well, {0}, I am thinking of a color between blue, green, red, orange, purple and pink.'.format(c1.name))
while guesses_made < 3:
c3.guess = input('Take a guess: ')
guesses_made += 1
if c3.guess != c2.color:
print ('Your guess is wrong.')
if c3.guess == c2.color:
break
if c3.guess == c2.color:
print ('Good job, {0}! You guessed my color in {1} guesses!'.format(c1.name, guesses_made))
else:
print ('Nope. The color I was thinking of was {0}'.format(c2.color))
if __name__ == "__main__":
program()
|
starnes/Python
|
guessnameclass.py
|
Python
|
mit
| 1,452 | 0.006887 |
"""
Routines for watching files for changes
"""
from __future__ import print_function
from builtins import zip
import time
import os
def watch(files, timeout=None, poll=2):
"""
Watch a given file or collection of files
until one changes. Uses polling.
Inputs
======
files - Name of one or more files to watch
timeout - Optional timeout in seconds
(Default is no timeout)
poll - Optional polling interval in seconds
(Default is 2 seconds)
Returns
=======
The name of the first changed file,
or None if timed out before any changes
Examples
========
To watch one file, timing out after 60 seconds:
>>> watch('file1', timeout=60)
To watch 2 files, never timing out:
>>> watch(['file1', 'file2'])
Author: Ben Dudson <benjamin.dudson@york.ac.uk>
"""
# Get modification time of file(s)
try:
if hasattr(files, '__iter__'):
# Iterable
lastmod = [ os.stat(f).st_mtime for f in files ]
iterable = True
else:
# Not iterable -> just one file
lastmod = os.stat(files).st_mtime
iterable = False
except:
print("Can't test modified time. Wrong file name?")
raise
start_time = time.time()
running = True
while running:
sleepfor = poll
if timeout:
# Check if timeout will be reached before next poll
if time.time() - start_time + sleepfor > timeout:
# Adjust time so that finish at timeout
sleepfor = timeout - (time.time() - start_time)
running = False # Stop after next test
time.sleep(sleepfor)
if iterable:
for last_t, f in zip(lastmod, files):
# Get the new modification time
t = os.stat(f).st_mtime
if t > last_t + 1.0: # +1 to reduce risk of false alarms
# File has been modified
return f
else:
t = os.stat(files).st_mtime
if t > lastmod + 1.0:
return files
return None
|
erikgrinaker/BOUT-dev
|
tools/pylib/boututils/watch.py
|
Python
|
gpl-3.0
| 2,197 | 0.004096 |
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
class TestQuizResult(unittest.TestCase):
pass
|
mhbu50/erpnext
|
erpnext/education/doctype/quiz_result/test_quiz_result.py
|
Python
|
gpl-3.0
| 153 | 0.006536 |
#!/usr/bin/python
from utils import mathfont
import fontforge
nArySumCodePoint = 0x2211 # largeop operator
v = 3 * mathfont.em
f = mathfont.create("limits-lowerlimitbaselinedropmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = v
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 11 * mathfont.em
f = mathfont.create("limits-lowerlimitgapmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = v
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 5 * mathfont.em
f = mathfont.create("limits-upperlimitbaselinerisemin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = v
f.math.UpperLimitGapMin = 0
mathfont.save(f)
v = 7 * mathfont.em
f = mathfont.create("limits-upperlimitgapmin%d" % v)
mathfont.createSquareGlyph(f, nArySumCodePoint)
f.math.LowerLimitBaselineDropMin = 0
f.math.LowerLimitGapMin = 0
f.math.OverbarExtraAscender = 0
f.math.OverbarVerticalGap = 0
f.math.StretchStackBottomShiftDown = 0
f.math.StretchStackGapAboveMin = 0
f.math.StretchStackGapBelowMin = 0
f.math.StretchStackTopShiftUp = 0
f.math.UnderbarExtraDescender = 0
f.math.UnderbarVerticalGap = 0
f.math.UpperLimitBaselineRiseMin = 0
f.math.UpperLimitGapMin = v
mathfont.save(f)
|
shinglyu/servo
|
tests/wpt/web-platform-tests/mathml/tools/limits.py
|
Python
|
mpl-2.0
| 2,284 | 0.000438 |
import json
from zipfile import ZipFile
import uuid
import activity
import re
import os
from os.path import isfile, join
from os import listdir, makedirs
from os import path
import datetime
from S3utility.s3_notification_info import S3NotificationInfo
from provider.execution_context import Session
import requests
from provider.storage_provider import StorageContext
from provider.article_structure import ArticleInfo
import provider.lax_provider as lax_provider
class activity_VersionDateLookup(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "VersionDateLookup"
self.pretty_name = "Version Date Lookup"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Looks up version date on Lax endpoints and stores version date in session " \
"(Currently used in Silent corrections only)"
self.logger = logger
def do_activity(self, data=None):
try:
session = Session(self.settings)
version = session.get_value(data['run'], 'version')
filename = session.get_value(data['run'], 'filename_last_element')
article_structure = ArticleInfo(filename)
version_date, error = self.get_version(self.settings, article_structure, article_structure.article_id, version)
if error is not None:
self.logger.error(error)
self.emit_monitor_event(self.settings, article_structure.article_id, version, data['run'],
self.pretty_name, "error",
" ".join(("Error Looking up version article", article_structure.article_id,
"message:", error)))
return activity.activity.ACTIVITY_PERMANENT_FAILURE
self.emit_monitor_event(self.settings, article_structure.article_id, version, data['run'],
self.pretty_name, "end",
" ".join(("Finished Version Lookup for article", article_structure.article_id,
"version:", version)))
session.store_value(data['run'], 'update_date', version_date)
return activity.activity.ACTIVITY_SUCCESS
except Exception as e:
self.logger.exception("Exception when trying to Lookup next version")
self.emit_monitor_event(self.settings, article_structure.article_id, version, data['run'], self.pretty_name,
"error", " ".join(("Error looking up version for article",
article_structure.article_id, "message:", str(e))))
return activity.activity.ACTIVITY_PERMANENT_FAILURE
def get_version(self, settings, article_structure, article_id, version):
try:
version_date = article_structure.get_update_date_from_zip_filename()
if version_date:
return version_date, None
version_date = lax_provider.article_version_date_by_version(article_id, version, settings)
return version_date, None
except Exception as e:
error_message = "Exception when looking up version Date. Message: " + str(e)
return version_date, error_message
def execute_function(self, the_function, arg1, arg2):
return the_function(arg1, arg2)
|
gnott/elife-bot
|
activity/activity_VersionDateLookup.py
|
Python
|
mit
| 3,787 | 0.004489 |
from sympy import Symbol, sin, cos, diff
from pprint import pprint
theta = Symbol('theta')
tdot = Symbol('tdot')
xdot = Symbol('xdot')
u = Symbol('u')
m_p_ = Symbol('m_p_')
m_c_ = Symbol('m_c_')
g_ = Symbol('g_')
l_ = Symbol('l_')
xddot = (u + m_p_ * sin(theta) * (l_ * (tdot * tdot) + g_ * cos(theta))) / (m_c_ + m_p_ * (sin(theta) * sin(theta)))
tddot = - (l_ * m_p_ * cos(theta) * sin(theta) * (tdot * tdot) + u * cos(theta) + (m_c_ + m_p_) * g_ * \
sin(theta))/ (l_ * m_c_ + l_ * m_p_ * (sin(theta)*sin(theta)))
f = [
xdot, tdot, xddot, tddot
]
fx = [
# d/dx, d/dtheta, d/dxdot, d/dtdot
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, diff(xddot, theta), 0, diff(xddot, tdot)],
[0, diff(tddot, theta), 0, diff(tddot, tdot)]
]
fu = [
0, 0, diff(xddot, u), diff(tddot, u)
]
fuu = [
0, 0, diff(diff(xddot, u), u), diff(diff(tddot, u), u)
]
fxx = [
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
], # fx_x
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, theta), theta), 0, diff(diff(xddot, tdot), theta)],
[0, diff(diff(tddot, theta), theta), 0, diff(diff(tddot, tdot), theta)]
], # fx_theta
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
], # fx_xdot
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, theta), tdot), 0, diff(diff(xddot, tdot), tdot)],
[0, diff(diff(tddot, theta), tdot), 0, diff(diff(tddot, tdot), tdot)]
], # fx_tdot
]
fu = [
0, 0, diff(xddot, u), diff(tddot, u)
]
# fu = 0, 0, diff(xddot, u), diff(tddot, u)
fux = [
# d/dx, d/dtheta, d/dxdot, d/dtdot
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, u), theta), 0, diff(diff(xddot, u), tdot)],
[0, diff(diff(tddot, u), theta), 0, diff(diff(tddot, u), tdot)]
]
fxu = [
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, diff(diff(xddot, theta), u), 0, diff(diff(xddot, tdot), u)],
[0, diff(diff(tddot, theta), u), 0, diff(diff(tddot, tdot), u)]
]
pprint(fxx)
pprint(fux)
pprint(fuu)
|
openhumanoids/exotica
|
exotations/dynamics_solvers/exotica_cartpole_dynamics_solver/scripts/gen_second_order_dynamics.py
|
Python
|
bsd-3-clause
| 2,089 | 0.004787 |
import xml.etree.ElementTree as ElementTree
import os.path
import sys
#
# is there a xmp sidecar file?
#
def get_xmp_filename(filename):
xmpfilename = False
# some xmp sidecar filenames are based on the original filename without extensions like .jpg or .jpeg
filenamewithoutextension = '.' . join(filename.split('.')[:-1])
# check if a xmp sidecar file exists
if os.path.isfile(filename + ".xmp"):
xmpfilename = filename + ".xmp"
elif os.path.isfile(filename + ".XMP"):
xmpfilename = filename + ".XMP"
elif os.path.isfile(filenamewithoutextension + ".xmp"):
xmpfilename = filenamewithoutextension + ".xmp"
elif os.path.isfile(filenamewithoutextension + ".XMP"):
xmpfilename = filenamewithoutextension + ".XMP"
return xmpfilename
# Build path facets from filename
class enhance_xmp(object):
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
verbose = False
if 'verbose' in parameters:
if parameters['verbose']:
verbose = True
filename = parameters['filename']
#
# is there a xmp sidecar file?
#
xmpfilename = get_xmp_filename(filename)
if not xmpfilename:
if verbose:
print("No xmp sidecar file")
#
# read meta data of the xmp sidecar file (= xml + rdf)
#
if xmpfilename:
creator = False
headline = False
creator = False
location = False
tags = []
if verbose:
print("Reading xmp sidecar file {}".format(xmpfilename))
try:
# Parse the xmp file with utf 8 encoding
parser = ElementTree.XMLParser(encoding="utf-8")
et = ElementTree.parse(xmpfilename, parser)
root = et.getroot()
# get author
try:
creator = root.findtext(
".//{http://purl.org/dc/elements/1.1/}creator")
if creator:
data['author_ss'] = creator
except BaseException as e:
sys.stderr.write("Exception while parsing creator from xmp {} {}".format(
xmpfilename, e.args[0]))
# get headline
try:
headline = root.findtext(
".//{http://ns.adobe.com/photoshop/1.0/}Headline")
if headline:
data['title_txt'] = headline
except BaseException as e:
sys.stderr.write("Exception while parsing headline from xmp {} {}".format(
xmpfilename, e.args[0]))
# get location
try:
location = root.findtext(
".//{http://iptc.org/std/Iptc4xmpCore/1.0/xmlns/}Location")
if location:
if 'locations_ss' in data:
data['locations_ss'].append(location)
else:
data['locations_ss'] = [location]
except BaseException as e:
sys.stderr.write("Exception while parsing location from xmp {} {}".format(
xmpfilename, e.args[0]))
# get tags (named "subject")
try:
for tag in root.findall(".//{http://purl.org/dc/elements/1.1/}subject/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Bag/{http://www.w3.org/1999/02/22-rdf-syntax-ns#}li"):
try:
if 'tag_ss' in data:
data['tag_ss'].append(tag.text)
else:
data['tag_ss'] = [tag.text]
except BaseException as e:
sys.stderr.write("Exception while parsing a tag from xmp {} {}".format(
xmpfilename, e.args[0]))
except BaseException as e:
sys.stderr.write("Exception while parsing tags from xmp {} {}".format(
xmpfilename, e.args[0]))
except BaseException as e:
sys.stderr.write("Exception while parsing xmp {} {}".format(
xmpfilename, e.args[0]))
return parameters, data
|
opensemanticsearch/open-semantic-etl
|
src/opensemanticetl/enhance_xmp.py
|
Python
|
gpl-3.0
| 4,568 | 0.001751 |
import colorsys
import logging
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_LIGHTBULB, CATEGORY_FAN
from hackoort.bulb import Bulb
def hls2rgb(h, l, s):
"""Convert h, l, s in 0-1 range to rgb in 0-255
:param h: hue
:param l: luminance
:param s: saturation
:return: red, green, blue in 0-255 range
"""
rgb = colorsys.hls_to_rgb(h, l, s)
r, g, b = (int(color * 255) for color in rgb)
return r,g, b
def rgb2hls(r, g, b):
"""Convert r,g,b in 0-255 range to hls in 0.1
:param r: red
:param g: green
:param b: blue
:return: hue, luminance, saturation
"""
return colorsys.rgb_to_hls(r/255.0, g/255.0, b/255.0)
class OortColorBulb(Accessory):
category = CATEGORY_LIGHTBULB
def __init__(self, driver, name, bulb: Bulb):
"""
:param driver: pyhap driver
:param name: descriptive name
:param bulb: it has to be connected oort bulb
"""
super().__init__(driver, name)
self.status = bulb.status
self.hue, _, self.saturation = rgb2hls(
self.status.red, self.status.green, self.status.blue)
serv_light = self.add_preload_service(
'Lightbulb', chars=["On", "Brightness", "Hue", "Saturation"]
)
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_on, value=self.status.on,
getter_callback=self.get_on
)
self.char_brightness = serv_light.configure_char(
"Brightness", setter_callback=self.set_brightness,
value=self.status.brightness, getter_callback=self.get_brightness
)
self.char_brightness = serv_light.configure_char(
"Hue", setter_callback=self.set_hue,
)
self.char_brightness = serv_light.configure_char(
"Saturation", setter_callback=self.set_saturation,
)
self.bulb = bulb
def get_on(self):
return self.status.on
def set_on(self, value):
# logging.info("Setting bulb: %s", value)
self.bulb.onoff(value)
# if value and self.bulb.status.rgbon:
# self.bulb.set_rgb_onoff(0)
def get_brightness(self):
return self.status.brightness
def set_brightness(self, value):
"""
The corresponding value is an integer representing a percentage
of the maximum brightness.
:param value:
:return:
"""
# logging.info("Setting brightness value: %s", value)
self.bulb.set_brightness_pct(value)
def set_hue(self, value):
"""
The corresponding value is a floating point number in units
of arc degrees. Values range from 0 to 360, representing the color
spectrum starting from red, through yellow, green, cyan, blue,
and finally magenta, before wrapping back to red.
"""
self.hue = value/360.0
self.bulb.set_rgb(*hls2rgb(self.hue, 0.5, self.saturation))
logging.info("Hue: %s", value)
def set_saturation(self, value):
"""
The corresponding value is a percentage of maximum saturation.
:param value:
:return:
"""
self.saturation = value / 100.0
logging.info("Saturation: %s", value)
def stop(self):
self.bulb.disconnect()
class OortColorBulbSwitch(Accessory):
category = CATEGORY_FAN
def __init__(self, driver, name, bulb: Bulb):
"""
:param driver: pyhap driver
:param name: descriptive name
:param bulb: it has to be connected oort bulb
"""
super().__init__(driver, name)
self.status = bulb.status
self.hue, _, self.saturation = rgb2hls(
self.status.red, self.status.green, self.status.blue)
serv_light = self.add_preload_service(
'Fan', chars=["On", "RotationDirection", "RotationSpeed"]
)
self.char_on = serv_light.configure_char(
'On', setter_callback=self.set_fake_on,
value=1, getter_callback=self.get_fake_on
)
self.char_color_on = serv_light.configure_char(
'RotationDirection', setter_callback=self.set_color_on,
value=self.status.on,
getter_callback=self.get_color_on
)
self.char_temperature = serv_light.configure_char(
"RotationSpeed", setter_callback=self.set_temperature,
value=self.status.brightness, getter_callback=self.get_temperature
)
self.bulb = bulb
def get_fake_on(value):
return 1
def set_fake_on(self, value):
pass
def get_color_on(self):
return self.status.rgbon
def set_color_on(self, value):
self.bulb.set_rgb_onoff(value)
def get_temperature(self):
return self.status.temperature
def set_temperature(self, value):
self.bulb.set_temperature_pct(value)
|
emsi/hackoort
|
python/oorthap/bulb.py
|
Python
|
gpl-3.0
| 4,950 | 0.000606 |
import redis
import copy
import json
def basic_init(self):
self.sep = "["
self.rel_sep = ":"
self.label_sep = "]"
self.namespace = []
class Build_Configuration(object):
def __init__( self, redis_handle):
self.redis_handle = redis_handle
self.delete_all()
self.keys = set()
basic_init(self)
def build_namespace( self,name ):
return_value = copy.deepcopy(self.namespace)
return_value.append(name)
return return_value
def pop_namespace( self ):
del self.namespace[-1]
def add_header_node( self, relation,label=None, properties = {}, json_flag= True ):
if label== None:
label = relation
properties["name"] = label
self.construct_node( True, relation, label, properties, json_flag )
def end_header_node( self, assert_namespace ):
assert (assert_namespace == self.namespace[-1][0]) ,"miss match namespace got "+assert_namespace+" expected "+self.namespace[-1][0]
del self.namespace[-1]
def check_namespace( self ):
assert len(self.namespace) == 0, "unbalanced name space, current namespace: "+ json.dumps(self.namespace)
#print ("name space is in balance")
def add_info_node( self, relation,label, properties = {}, json_flag= True ):
self.construct_node( False, relation, label, properties, json_flag )
# concept of namespace name is a string which ensures unique name
# the name is essentially the directory structure of the tree
def construct_node(self, push_namespace,relationship, label, properties, json_flag = True ):
redis_key, new_name_space = self.construct_basic_node( self.namespace, relationship,label )
if redis_key in self.keys:
raise ValueError("Duplicate Key")
self.keys.add(redis_key)
for i in properties.keys():
temp = json.dumps(properties[i] )
self.redis_handle.hset(redis_key, i, temp )
if push_namespace == True:
self.namespace = new_name_space
def _convert_namespace( self, namespace):
temp_value = []
for i in namespace:
temp_value.append(self.make_string_key( i[0],i[1] ))
key_string = self.sep+self.sep.join(temp_value)
return key_string
def construct_basic_node( self, namespace, relationship,label ): #tested
new_name_space = copy.copy(namespace)
new_name_space.append( [ relationship,label ] )
redis_string = self._convert_namespace(new_name_space)
self.redis_handle.hset(redis_string,"namespace",json.dumps(redis_string))
self.redis_handle.hset(redis_string,"name",json.dumps(label))
self.update_terminals( relationship, label, redis_string)
self.update_relationship( new_name_space, redis_string )
return redis_string, new_name_space
def make_string_key( self, relationship,label):
return relationship+self.rel_sep+label+self.label_sep
def update_relationship( self, new_name_space, redis_string ):
for relationship,label in new_name_space:
#print( relationship,label,redis_string)
self.redis_handle.sadd("@RELATIONSHIPS",relationship)
self.redis_handle.sadd("%"+relationship,redis_string)
self.redis_handle.sadd("#"+relationship+self.rel_sep+label,redis_string)
def update_terminals( self, relationship,label, redis_string ):
self.redis_handle.sadd("@TERMINALS",relationship)
self.redis_handle.sadd("&"+relationship,redis_string)
self.redis_handle.sadd("$"+relationship+self.rel_sep+label,redis_string)
def store_keys( self ):
for i in self.keys:
self.redis_handle.sadd("@GRAPH_KEYS", i )
def delete_all(self): #tested
self.redis_handle.flushdb()
class Query_Configuration(object):
def __init__( self, redis_handle):
self.redis_handle = redis_handle
basic_init(self)
def to_dictionary( self, list, key, json_flag = False ):
return_value = {}
for i in list:
if json_flag == True:
i = json.loads(i)
return_value[i[key]] = i
return return_value
def match_terminal_relationship( self, relationship, label= None , starting_set = None,property_values = None, data_flag = True ):
return_value = None
#print("initial starting set",starting_set)
if starting_set == None:
starting_set = self.redis_handle.smembers("@GRAPH_KEYS")
#print("starting set",starting_set)#
if label == None:
#print("made it here")
if self.redis_handle.sismember( "@TERMINALS", relationship) == True:
#print("made it here #2")
return_value = set(self.redis_handle.smembers("&"+relationship))
#print("return_value 1",return_value)
#print( starting_set)
return_value = return_value.intersection(starting_set)
#print("return_value",return_value)
else:
if self.redis_handle.sismember( "@TERMINALS", relationship) == True:
if self.redis_handle.exists("$"+relationship+self.rel_sep+label) == True:
return_value = self.redis_handle.smembers("$"+relationship+self.rel_sep+label)
return_value = return_value.intersection(starting_set)
if (property_values != None) and (return_value != None):
return_value = self.match_properties( return_value , property_values )
if data_flag == True:
return_value = self.return_data( return_value)
return return_value
def match_relationship( self, relationship, label= None , starting_set = None ):
return_value = None
if starting_set == None:
starting_set = self.redis_handle.smembers("@GRAPH_KEYS")
#print("starting set",starting_set)#
if label == None:
#print("made it here")
if self.redis_handle.sismember( "@RELATIONSHIPS", relationship) == True:
#print("made it here #2")
return_value = set(self.redis_handle.smembers("%"+relationship))
#print("return_value 1",return_value)
#print( starting_set)
return_value = return_value.intersection(starting_set)
else:
if self.redis_handle.sismember( "@RELATIONSHIPS", relationship) == True:
if self.redis_handle.exists("#"+relationship+self.rel_sep+label) == True:
return_value = self.redis_handle.smembers("#"+relationship+self.rel_sep+label)
return_value = return_value.intersection(starting_set)
return return_value
def match_properties( self, starting_set , property_values ):
return_value = []
for i in list(starting_set):
flag = True
for j , value in property_values.items():
data = self.redis_handle.hget(i,j)
if data == None:
flag = False
break
if json.loads(data) != value:
flag = False
break
if flag == True:
return_value.append( i)
return return_value
def match_relationship_list ( self, relationship_list, starting_set = None, property_values = None, fetch_values = True ):
for relationship ,label in relationship_list:
starting_set = self.match_relationship( relationship, label, starting_set )
if property_values != None:
starting_set = self.match_properties( starting_set, property_values )
if fetch_values == True:
return_value = self.return_data( starting_set)
else:
return_value = starting_set
return return_value
def return_data( self, key_set ):
return_value = []
for i in key_set:
data = self.redis_handle.hgetall(i)
temp = {}
for j in data.keys():
try:
temp[j] = json.loads(data[j] )
except:
#print("exception")
temp[j] = data[j]
return_value.append(temp)
return return_value
def modify_properties( self, redis_key, new_properties):
for i , value in new_properties.items():
self.redis_handle.hset(redis_key,i, value )
def form_dict_from_list( self, list_set, dict_property ):
return_value = {}
for i in list_set:
return_value[i[dict_property]] = i
return return_value
def form_key_list( self,key, property_array ):
return_value = []
for i in property_array:
return_value.append(i[key])
return return_value
if __name__ == "__main__":
redis_handle = redis.StrictRedis( host = "127.0.0.1", port=6379, db = 11 , decode_responses=True)
bc = Build_Configuration( redis_handle)
qc = Query_Configuration(redis_handle)
bc.construct_node( True, "HEAD","HEAD",{})
bc.construct_node( True, "Level_1","level11",{})
bc.construct_node( True, "Level_2","level21",{} )
bc.pop_namespace()
bc.construct_node( True, "Level_2","level12",{})
bc.construct_node( True, "Level_3","level33",{} )
bc.store_keys()
#print ("nodes ",redis_handle.keys("*]"))
#print ("system",redis_handle.keys("@*"))
#print ("relations",redis_handle.keys("%*"))
#print ("labels",redis_handle.keys("#*"))
#print ("all redis keys", redis_handle.keys("*"))
print("single relationship", qc.match_relationship("Level_1"))
print("single relationship-label", qc.match_relationship("Level_3","level33"))
x = qc.match_relationship_list( [["Level_1","level11"],["Level_2","level12"]],fetch_values= True)
print ("multiple relationship")
for i in x:
print( i )
x = qc.match_relationship_list( [["Level_1","level11"]],property_values={"name":"level21"},fetch_values= True)
print ("multiple relationship")
for i in x:
print( i )
new_properties = {"speed":10,"acc":32.2 }
qc.modify_properties( '[HEAD:HEAD][Level_1:level11][Level_2:level21]', new_properties)
print( redis_handle.hgetall('[HEAD:HEAD][Level_1:level11][Level_2:level21]'))
print (qc.match_terminal_relationship( "Level_2", label= None , starting_set = None ))
print (qc.match_terminal_relationship( "Level_2", label= "level21" , starting_set = None ))
print (qc.match_terminal_relationship( "Level_2", label= None , starting_set = None ,data_flag = False))
print (qc.match_terminal_relationship( "Level_2", label= "level21" , starting_set = None,data_flag = False ))
pv = {"speed":10,"acc":32.2}
print (qc.match_terminal_relationship( "Level_2", label= "level21" ,property_values = pv, starting_set = None ))
|
glenn-edgar/local_controller_3
|
redis_graph_py3/redis_graph_functions.py
|
Python
|
mit
| 10,886 | 0.054106 |
from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_K, htr_to_meV, htr_to_eV
import argparser
import norm_k
import numpy as np
import scf
import system
args = argparser.read_argument('Evaluate step-like feature in electron-phonon coupling')
thres = args.thres / htr_to_meV
beta = htr_to_K / args.temp
Sigma = system.make_data(args.dft, args.vb)
Sigma.bose_einstein = bose_einstein(Sigma.freq, beta)
for energy_meV in np.arange(0.0, args.energy, 0.5):
energy = energy_meV / htr_to_meV
kk = norm_k.eval(Sigma.eff_mass, energy)
Sigma_in = 1e-3j / htr_to_meV
Sigma_out, it = scf.self_energy(args.method, thres, Sigma, kk, Sigma_in)
if args.vb: real_energy = -energy
else: real_energy = energy
print(real_energy * htr_to_meV, -Sigma_out.imag * htr_to_meV, it)
|
mmdg-oxford/papers
|
Schlipf-PRL-2018/model/step.py
|
Python
|
gpl-3.0
| 830 | 0.012048 |
# From https://gist.github.com/destan/5540702#file-text2png-py
# coding=utf8
import multiprocessing
import threading
import time
import atexit
import os
import vmprof
def install_vmprof(name="thread"):
cpid = multiprocessing.current_process().name
ctid = threading.current_thread().name
fname = "vmprof-{}-{}-{}-{}.dat".format(name, cpid, ctid, time.time())
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
outfd = os.open(fname, flags)
vmprof.enable(outfd, period=0.01)
# atexit.register(close_profile_file)
def close_profile_file():
print("Closing VMProf!")
vmprof.disable()
print("VMProf closed!")
|
fake-name/ReadableWebProxy
|
Misc/install_vmprof.py
|
Python
|
bsd-3-clause
| 616 | 0.021104 |
#!/bin/python
from urllib import request
from pymongo import Connection
import argparse
import json
import pymongo
req = request.urlopen('https://blockchain.info/no/api/receive?method=create&address=19J9J4QHDun5YgUTfEU1qb3fSHTbCwcjGj')
encoding = req.headers.get_content_charset()
obj = json.loads(req.read().decode(encoding))
print(obj['input_address'])
parser = argparse.ArgumentParser()
parser.add_argument("--price")
parser.add_argument("--name")
parser.add_argument("--description")
args = parser.parse_args()
price = float(args.price) * 100000000
connection=Connection()
database=connection['bitcoin']
mycollection=database.entries
post={"Address":(obj['input_address']), "Price":price, "Name":args.name, "Description":args.description, "Confirmed":"No"}
mycollection.insert(post)
|
roypur/python-bitcoin-accounting
|
new.py
|
Python
|
gpl-3.0
| 797 | 0.015056 |
from marshmallow import Schema, fields, post_load
from marshmallow_enum import EnumField
from enum import IntFlag
from .. import models
from commandment.inventory import models as inventory_models
class ErrorChainItem(Schema):
LocalizedDescription = fields.String()
USEnglishDescription = fields.String()
ErrorDomain = fields.String()
ErrorCode = fields.Number()
class CommandResponse(Schema):
Status = fields.String()
UDID = fields.UUID()
CommandUUID = fields.UUID()
ErrorChain = fields.Nested(ErrorChainItem, many=True)
class OrganizationInfo(Schema):
pass
class AutoSetupAdminAccount(Schema):
GUID = fields.UUID()
shortName = fields.String()
class OSUpdateSettings(Schema):
CatalogURL = fields.String()
IsDefaultCatalog = fields.Boolean()
PreviousScanDate = fields.Date()
PreviousScanResult = fields.String()
PerformPeriodicCheck = fields.Boolean()
AutomaticCheckEnabled = fields.Boolean()
BackgroundDownloadEnabled = fields.Boolean()
AutomaticAppInstallationEnabled = fields.Boolean()
AutomaticOSInstallationEnabled = fields.Boolean()
AutomaticSecurityUpdatesEnabled = fields.Boolean()
class DeviceInformation(Schema):
# Table 5
UDID = fields.String(attribute='udid')
# Languages
DeviceID = fields.String(attribute='device_id')
OrganizationInfo = fields.Nested(OrganizationInfo)
LastCloudBackupDate = fields.Date(attribute='last_cloud_backup_date')
AwaitingConfiguration = fields.Boolean(attribute='awaiting_configuration')
AutoSetupAdminAccounts = fields.Nested(AutoSetupAdminAccount, many=True)
# Table 6
iTunesStoreAccountIsActive = fields.Boolean(attribute='itunes_store_account_is_active')
iTunesStoreAccountHash = fields.String(attribute='itunes_store_account_hash')
# Table 7
DeviceName = fields.String(attribute='device_name')
OSVersion = fields.String(attribute='os_version')
BuildVersion = fields.String(attribute='build_version')
ModelName = fields.String(attribute='model_name')
Model = fields.String(attribute='model')
ProductName = fields.String(attribute='product_name')
SerialNumber = fields.String(attribute='serial_number')
DeviceCapacity = fields.Float(attribute='device_capacity')
AvailableDeviceCapacity = fields.Float(attribute='available_device_capacity')
BatteryLevel = fields.Float(attribute='battery_level')
CellularTechnology = fields.Integer(attribute='cellular_technology')
IMEI = fields.String(attribute='imei')
MEID = fields.String(attribute='meid')
ModemFirmwareVersion = fields.String(attribute='modem_firmware_version')
IsSupervised = fields.Boolean(attribute='is_supervised')
IsDeviceLocatorServiceEnabled = fields.Boolean(attribute='is_device_locator_service_enabled')
IsActivationLockEnabled = fields.Boolean(attribute='is_activation_lock_enabled')
IsDoNotDisturbInEffect = fields.Boolean(attribute='is_do_not_disturb_in_effect')
EASDeviceIdentifier = fields.String(attribute='eas_device_identifier')
IsCloudBackupEnabled = fields.Boolean(attribute='is_cloud_backup_enabled')
OSUpdateSettings = fields.Nested(OSUpdateSettings, attribute='os_update_settings') # T8
LocalHostName = fields.String(attribute='local_hostname')
HostName = fields.String(attribute='hostname')
SystemIntegrityProtectionEnabled = fields.Boolean(attribute='sip_enabled')
# Array of str
#ActiveManagedUsers = fields.Nested(ActiveManagedUser)
IsMDMLostModeEnabled = fields.Boolean(attribute='is_mdm_lost_mode_enabled')
MaximumResidentUsers = fields.Integer(attribute='maximum_resident_users')
# Table 9
ICCID = fields.String(attribute='iccid')
BluetoothMAC = fields.String(attribute='bluetooth_mac')
WiFiMAC = fields.String(attribute='wifi_mac')
EthernetMACs = fields.String(attribute='ethernet_macs', many=True)
CurrentCarrierNetwork = fields.String(attribute='current_carrier_network')
SIMCarrierNetwork = fields.String(attribute='sim_carrier_network')
SubscriberCarrierNetwork = fields.String(attribute='subscriber_carrier_network')
CarrierSettingsVersion = fields.String(attribute='carrier_settings_version')
PhoneNumber = fields.String(attribute='phone_number')
VoiceRoamingEnabled = fields.Boolean(attribute='voice_roaming_enabled')
DataRoamingEnabled = fields.Boolean(attribute='data_roaming_enabled')
IsRoaming = fields.Boolean(attribute='is_roaming')
PersonalHotspotEnabled = fields.Boolean(attribute='personal_hotspot_enabled')
SubscriberMCC = fields.String(attribute='subscriber_mcc')
SubscriberMNC = fields.String(attribute='subscriber_mnc')
CurrentMCC = fields.String(attribute='current_mcc')
CurrentMNC = fields.String(attribute='current_mnc')
# @post_load
# def make_device(self, data):
# return models.Device(**data)
class DeviceInformationResponse(CommandResponse):
QueryResponses = fields.Nested(DeviceInformation)
class HardwareEncryptionCaps(IntFlag):
Nothing = 0
BlockLevelEncryption = 1
FileLevelEncryption = 2
All = BlockLevelEncryption | FileLevelEncryption
class FirewallApplicationItem(Schema):
BundleID = fields.String()
Allowed = fields.Boolean()
Name = fields.String()
class FirewallSettings(Schema):
FirewallEnabled = fields.Boolean()
BlockAllIncoming = fields.Boolean()
StealthMode = fields.Boolean()
Applications = fields.Nested(FirewallApplicationItem, many=True)
class SecurityInfoResponse(CommandResponse):
HardwareEncryptionCaps = EnumField(HardwareEncryptionCaps)
PasscodePresent = fields.Boolean()
PasscodeCompliant = fields.Boolean()
PasscodeCompliantWithProfiles = fields.Boolean()
PasscodeLockGracePeriodEnforced = fields.Integer()
FDE_Enabled = fields.Boolean()
FDE_HasPersonalRecoveryKey = fields.Boolean()
FDE_HasInstitutionalRecoveryKey = fields.Boolean()
FirewallSettings = fields.Nested(FirewallSettings)
SystemIntegrityProtectionEnabled = fields.Boolean()
class InstalledApplication(Schema):
Identifier = fields.String(attribute='bundle_identifier')
Version = fields.String(attribute='version')
ShortVersion = fields.String(attribute='short_version')
Name = fields.String(attribute='name')
BundleSize = fields.Integer(attribute='bundle_size')
DynamicSize = fields.Integer(attribute='dynamic_size')
IsValidated = fields.Boolean(attribute='is_validated')
ExternalVersionIdentifier = fields.String(attribute='external_version_identifier') # iOS 11
@post_load
def make_installed_application(self, data: dict) -> inventory_models.InstalledApplication:
return inventory_models.InstalledApplication(**data)
class InstalledApplicationListResponse(CommandResponse):
InstalledApplicationList = fields.Nested(InstalledApplication, many=True)
class CertificateListItem(Schema):
CommonName = fields.String()
IsIdentity = fields.Boolean()
Data = fields.String()
@post_load
def make_installed_certificate(self, data: dict) -> inventory_models.InstalledCertificate:
return inventory_models.InstalledCertificate(**data)
class CertificateListResponse(CommandResponse):
CertificateList = fields.Nested(CertificateListItem, many=True)
class AvailableOSUpdate(Schema):
AllowsInstallLater = fields.Boolean(attribute='allows_install_later')
AppIdentifiersToClose = fields.List(fields.String, attribute='app_identifiers_to_close', many=True)
HumanReadableName = fields.String(attribute='human_readable_name')
HumanReadableNameLocale = fields.String(attribute='human_readable_name_locale')
IsConfigDataUpdate = fields.Boolean(attribute='is_config_data_update')
IsCritical = fields.Boolean(attribute='is_critical')
IsFirmwareUpdate = fields.Boolean(attribute='is_firmware_update')
MetadataURL = fields.String(attribute='metadata_url')
ProductKey = fields.String(attribute='product_key')
RestartRequired = fields.Boolean(attribute='restart_required')
Version = fields.String(attribute='version')
@post_load
def make_available_os_update(self, data: dict) -> models.AvailableOSUpdate:
return models.AvailableOSUpdate(**data)
class AvailableOSUpdateListResponse(CommandResponse):
AvailableOSUpdates = fields.Nested(AvailableOSUpdate, many=True)
class ProfileListPayloadItem(Schema):
PayloadDescription = fields.String(attribute='description')
PayloadDisplayName = fields.String(attribute='display_name')
PayloadIdentifier = fields.String(attribute='identifier')
PayloadOrganization = fields.String(attribute='organization')
PayloadType = fields.String(attribute='payload_type')
PayloadUUID = fields.UUID(attribute='uuid')
# PayloadVersion = fields.Integer(attribute='payload_version')
@post_load
def make_installed_payload(self, data: dict) -> inventory_models.InstalledPayload:
return inventory_models.InstalledPayload(**data)
class ProfileListItem(Schema):
HasRemovalPasscode = fields.Boolean(attribute='has_removal_password')
IsEncrypted = fields.Boolean(attribute='is_encrypted')
PayloadDescription = fields.String(attribute='payload_description')
PayloadDisplayName = fields.String(attribute='payload_display_name')
PayloadIdentifier = fields.String(attribute='payload_identifier')
PayloadOrganization = fields.String(attribute='payload_organization')
PayloadRemovalDisallowed = fields.Boolean(attribute='payload_removal_disallowed')
PayloadUUID = fields.UUID(attribute='payload_uuid')
# PayloadVersion = fields.Integer(attribute='payload_version')
#SignerCertificates = fields.Nested(attribute='signer_certificates', many=True)
PayloadContent = fields.Nested(ProfileListPayloadItem, attribute='payload_content', many=True)
@post_load
def make_installed_profile(self, data: dict) -> inventory_models.InstalledProfile:
return inventory_models.InstalledProfile(**data)
class ProfileListResponse(CommandResponse):
ProfileList = fields.Nested(ProfileListItem, many=True)
|
mosen/commandment
|
commandment/mdm/response_schema.py
|
Python
|
mit
| 10,132 | 0.00227 |
# -*- coding: utf-8 -*-
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests import common
from openerp.tools import SUPERUSER_ID
class TestPurchaseRequestToRequisition(common.TransactionCase):
def setUp(self):
super(TestPurchaseRequestToRequisition, self).setUp()
self.purchase_request = self.env['purchase.request']
self.purchase_request_line = self.env['purchase.request.line']
self.wiz =\
self.env['purchase.request.line.make.purchase.requisition']
self.purchase_requisition_partner_model =\
self.env['purchase.requisition.partner']
self.purchase_order = self.env['purchase.order']
def test_purchase_request_to_purchase_requisition(self):
vals = {
'picking_type_id': self.env.ref('stock.picking_type_in').id,
'requested_by': SUPERUSER_ID,
}
purchase_request = self.purchase_request.create(vals)
vals = {
'request_id': purchase_request.id,
'product_id': self.env.ref('product.product_product_13').id,
'product_uom_id': self.env.ref('product.product_uom_unit').id,
'product_qty': 5.0,
}
purchase_request_line = self.purchase_request_line.create(vals)
wiz_id = self.wiz.with_context(
active_model="purchase.request.line",
active_ids=[purchase_request_line.id],
active_id=purchase_request_line.id,).create({})
wiz_id.make_purchase_requisition()
self.assertTrue(
len(purchase_request_line.requisition_lines.ids) == 1,
'Should have one purchase requisition line created')
requisition_id = purchase_request_line.requisition_lines.requisition_id
self.assertEquals(
len(purchase_request.line_ids),
len(requisition_id.line_ids), 'Should have the same lines')
requisition_line = requisition_id.line_ids
self.assertEquals(
requisition_line.product_id.id,
purchase_request_line.product_id.id,
'Should have the same products')
self.assertEquals(
purchase_request.state,
requisition_id.state,
'Should have the same state')
requisition_id.tender_in_progress()
requisition_id.tender_open()
vals = {
'partner_id': self.env.ref('base.res_partner_12').id,
}
requisition_partner_id =\
self.purchase_requisition_partner_model.with_context(
active_model='purchase.requisition',
active_ids=[requisition_id.id],
active_id=requisition_id.id,).create(vals)
requisition_partner_id.create_order()
domain = [
('requisition_id', '=', requisition_id.id),
]
purchase_id = self.purchase_order.search(domain)
self.assertTrue(purchase_id, 'Should find purchase order')
purchase_id.signal_workflow('purchase_confirm')
self.assertEquals(
len(
purchase_id.order_line.purchase_request_lines
), 1, 'Should have a link between order lines and request lines')
|
andrius-preimantas/purchase-workflow
|
purchase_request_to_requisition/tests/test_purchase_request_to_requisition.py
|
Python
|
agpl-3.0
| 3,251 | 0 |
#!/usr/bin python3
# -*- coding:utf-8 -*-
# File Name: fact.py
# Author: Lipsum
# Mail: niuleipeng@gmail.com
# Created Time: 2016-05-11 17:27:38
# def fact(n):
# if n == 1:
# return 1
# return fact(n-1) * n
def fact(n):
return fact_iter(n, 1)
def fact_iter(num, product):
if num == 1:
return product
return fact_iter(num - 1, num * product)
num = int(input('input a number plz:'))
print(fact(num));
|
saturnast/python-learning
|
tempCodeRunnerFile.py
|
Python
|
mit
| 441 | 0.006803 |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
from weboob.browser import LoginBrowser, need_login, StatesMixin
from weboob.browser.url import URL
from weboob.browser.exceptions import ClientError
from weboob.exceptions import BrowserIncorrectPassword, BrowserUnavailable
from weboob.capabilities.base import find_object
from weboob.capabilities.bank import Account
from weboob.tools.capabilities.bank.transactions import sorted_transactions, FrenchTransaction
from .pages import (
ErrorPage,
LoginPage, CenetLoginPage, CenetHomePage,
CenetAccountsPage, CenetAccountHistoryPage, CenetCardsPage,
CenetCardSummaryPage, SubscriptionPage, DownloadDocumentPage,
CenetLoanPage,
)
from ..pages import CaissedepargneKeyboard
__all__ = ['CenetBrowser']
class CenetBrowser(LoginBrowser, StatesMixin):
BASEURL = "https://www.cenet.caisse-epargne.fr"
STATE_DURATION = 5
login = URL(
r'https://(?P<domain>[^/]+)/authentification/manage\?step=identification&identifiant=(?P<login>.*)',
r'https://.*/authentification/manage\?step=identification&identifiant=.*',
r'https://.*/login.aspx',
LoginPage,
)
account_login = URL(r'https://(?P<domain>[^/]+)/authentification/manage\?step=account&identifiant=(?P<login>.*)&account=(?P<accountType>.*)', LoginPage)
cenet_vk = URL(r'https://www.cenet.caisse-epargne.fr/Web/Api/ApiAuthentification.asmx/ChargerClavierVirtuel')
cenet_home = URL(r'/Default.aspx$', CenetHomePage)
cenet_accounts = URL(r'/Web/Api/ApiComptes.asmx/ChargerSyntheseComptes', CenetAccountsPage)
cenet_loans = URL(r'/Web/Api/ApiFinancements.asmx/ChargerListeFinancementsMLT', CenetLoanPage)
cenet_account_history = URL(r'/Web/Api/ApiComptes.asmx/ChargerHistoriqueCompte', CenetAccountHistoryPage)
cenet_account_coming = URL(r'/Web/Api/ApiCartesBanquaires.asmx/ChargerEnCoursCarte', CenetAccountHistoryPage)
cenet_tr_detail = URL(r'/Web/Api/ApiComptes.asmx/ChargerDetailOperation', CenetCardSummaryPage)
cenet_cards = URL(r'/Web/Api/ApiCartesBanquaires.asmx/ChargerCartes', CenetCardsPage)
error = URL(
r'https://.*/login.aspx',
r'https://.*/Pages/logout.aspx.*',
r'https://.*/particuliers/Page_erreur_technique.aspx.*',
ErrorPage,
)
cenet_login = URL(
r'https://.*/$',
r'https://.*/default.aspx',
CenetLoginPage,
)
subscription = URL(r'/Web/Api/ApiReleves.asmx/ChargerListeEtablissements', SubscriptionPage)
documents = URL(r'/Web/Api/ApiReleves.asmx/ChargerListeReleves', SubscriptionPage)
download = URL(r'/Default.aspx\?dashboard=ComptesReleves&lien=SuiviReleves', DownloadDocumentPage)
__states__ = ('BASEURL',)
def __init__(self, nuser, *args, **kwargs):
# The URL to log in and to navigate are different
self.login_domain = kwargs.pop('domain', self.BASEURL)
if not self.BASEURL.startswith('https://'):
self.BASEURL = 'https://%s' % self.BASEURL
self.accounts = None
self.nuser = nuser
super(CenetBrowser, self).__init__(*args, **kwargs)
def do_login(self):
data = self.login.go(login=self.username, domain=self.login_domain).get_response()
if len(data['account']) > 1:
# additional request where there is more than one
# connection type (called typeAccount)
# TODO: test all connection type values if needed
account_type = data['account'][0]
self.account_login.go(login=self.username, accountType=account_type, domain=self.login_domain)
data = self.page.get_response()
if data is None:
raise BrowserIncorrectPassword()
elif not self.nuser:
raise BrowserIncorrectPassword("Erreur: Numéro d'utilisateur requis.")
if "authMode" in data and data['authMode'] != 'redirect':
raise BrowserIncorrectPassword()
payload = {'contexte': '', 'dataEntree': None, 'donneesEntree': "{}", 'filtreEntree': "\"false\""}
res = self.cenet_vk.open(data=json.dumps(payload), headers={'Content-Type': "application/json"})
content = json.loads(res.text)
d = json.loads(content['d'])
end = json.loads(d['DonneesSortie'])
_id = end['Identifiant']
vk = CaissedepargneKeyboard(end['Image'], end['NumerosEncodes'])
code = vk.get_string_code(self.password)
post_data = {
'CodeEtablissement': data['codeCaisse'],
'NumeroBad': self.username,
'NumeroUtilisateur': self.nuser
}
self.location(data['url'], data=post_data, headers={'Referer': 'https://www.cenet.caisse-epargne.fr/'})
return self.page.login(self.username, self.password, self.nuser, data['codeCaisse'], _id, code)
@need_login
def get_accounts_list(self):
if self.accounts is None:
data = {
'contexte': '',
'dateEntree': None,
'donneesEntree': 'null',
'filtreEntree': None
}
try:
self.accounts = [account for account in self.cenet_accounts.go(json=data).get_accounts()]
except ClientError:
# Unauthorized due to wrongpass
raise BrowserIncorrectPassword()
self.cenet_loans.go(json=data)
for account in self.page.get_accounts():
self.accounts.append(account)
for account in self.accounts:
try:
account._cards = []
self.cenet_cards.go(json=data)
for card in self.page.get_cards():
if card['Compte']['Numero'] == account.id:
account._cards.append(card)
except BrowserUnavailable:
# for some accounts, the site can throw us an error, during weeks
self.logger.warning('ignoring cards because site is unavailable...')
account._cards = []
return iter(self.accounts)
def get_loans_list(self):
return []
@need_login
def get_history(self, account):
if account.type == Account.TYPE_LOAN:
return []
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01'
}
data = {
'contexte': '',
'dateEntree': None,
'filtreEntree': None,
'donneesEntree': json.dumps(account._formated),
}
items = []
self.cenet_account_history.go(data=json.dumps(data), headers=headers)
# there might be some duplicate transactions regarding the card type ones
# because some requests lead to the same transaction list
# even with different parameters/data in the request
card_tr_list = []
while True:
data_out = self.page.doc['DonneesSortie']
for tr in self.page.get_history():
items.append(tr)
if tr.type is FrenchTransaction.TYPE_CARD_SUMMARY:
if find_object(card_tr_list, label=tr.label, amount=tr.amount, raw=tr.raw, date=tr.date, rdate=tr.rdate):
self.logger.warning('Duplicated transaction: %s', tr)
items.pop()
continue
card_tr_list.append(tr)
tr.deleted = True
tr_dict = [tr_dict2 for tr_dict2 in data_out if tr_dict2['Libelle'] == tr.label]
donneesEntree = {}
donneesEntree['Compte'] = account._formated
donneesEntree['ListeOperations'] = [tr_dict[0]]
deferred_data = {
'contexte': '',
'dateEntree': None,
'donneesEntree': json.dumps(donneesEntree).replace('/', '\\/'),
'filtreEntree': json.dumps(tr_dict[0]).replace('/', '\\/')
}
tr_detail_page = self.cenet_tr_detail.open(data=json.dumps(deferred_data), headers=headers)
for tr in tr_detail_page.get_history():
items.append(tr)
offset = self.page.next_offset()
if not offset:
break
data['filtreEntree'] = json.dumps({
'Offset': offset,
})
self.cenet_account_history.go(data=json.dumps(data), headers=headers)
return sorted_transactions(items)
@need_login
def get_coming(self, account):
if account.type == Account.TYPE_LOAN:
return []
trs = []
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01'
}
for card in account._cards:
if card['CumulEnCours']['Montant']['Valeur'] != 0:
data = {
'contexte': '',
'dateEntree': None,
'donneesEntree': json.dumps(card),
'filtreEntree': None
}
for tr in self.cenet_account_coming.go(data=json.dumps(data), headers=headers).get_history():
trs.append(tr)
return sorted_transactions(trs)
@need_login
def get_investment(self, account):
# not available for the moment
return []
@need_login
def get_advisor(self):
return [self.cenet_home.stay_or_go().get_advisor()]
@need_login
def get_profile(self):
return self.cenet_home.stay_or_go().get_profile()
def iter_recipients(self, origin_account):
raise NotImplementedError()
def init_transfer(self, account, recipient, transfer):
raise NotImplementedError()
def new_recipient(self, recipient, **params):
raise NotImplementedError()
@need_login
def iter_subscription(self):
subscriber = self.get_profile().name
json_data = {
'contexte': '',
'dateEntree': None,
'donneesEntree': 'null',
'filtreEntree': None
}
self.subscription.go(json=json_data)
return self.page.iter_subscription(subscriber=subscriber)
@need_login
def iter_documents(self, subscription):
sub_id = subscription.id
input_filter = {
'Page': 0,
'NombreParPage': 0,
'Tris': [],
'Criteres': [
{'Champ': 'Etablissement', 'TypeCritere': 'Equals', 'Value': sub_id},
{'Champ': 'DateDebut', 'TypeCritere': 'Equals', 'Value': None},
{'Champ': 'DateFin', 'TypeCritere': 'Equals', 'Value': None},
{'Champ': 'MaxRelevesAffichesParNumero', 'TypeCritere': 'Equals', 'Value': '100'},
],
}
json_data = {
'contexte': '',
'dateEntree': None,
'donneesEntree': 'null',
'filtreEntree': json.dumps(input_filter)
}
self.documents.go(json=json_data)
return self.page.iter_documents(sub_id=sub_id, sub_label=subscription.label, username=self.username)
@need_login
def download_document(self, document):
self.download.go()
return self.page.download_form(document).content
|
laurentb/weboob
|
modules/caissedepargne/cenet/browser.py
|
Python
|
lgpl-3.0
| 12,250 | 0.002857 |
import networkx as nx
#from networkx.generators.smax import li_smax_graph
def s_metric(G, normalized=True):
"""Return the s-metric of graph.
The s-metric is defined as the sum of the products deg(u)*deg(v)
for every edge (u,v) in G. If norm is provided construct the
s-max graph and compute it's s_metric, and return the normalized
s value
Parameters
----------
G : graph
The graph used to compute the s-metric.
normalized : bool (optional)
Normalize the value.
Returns
-------
s : float
The s-metric of the graph.
References
----------
.. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger,
Towards a Theory of Scale-Free Graphs:
Definition, Properties, and Implications (Extended Version), 2005.
https://arxiv.org/abs/cond-mat/0501169
"""
if normalized:
raise nx.NetworkXError("Normalization not implemented")
# Gmax = li_smax_graph(list(G.degree().values()))
# return s_metric(G,normalized=False)/s_metric(Gmax,normalized=False)
# else:
return float(sum([G.degree(u) * G.degree(v) for (u, v) in G.edges()]))
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/smetric.py
|
Python
|
gpl-3.0
| 1,194 | 0.000838 |
import pandas as pd
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
from rpy2.robjects import Formula
from rpy2 import robjects as ro
from survey_stats.helpr import svyciprop_xlogit, svybyci_xlogit, factor_summary
from survey_stats.helpr import filter_survey_var, rm_nan_survey_var, svyby_nodrop
from survey_stats.helpr import fix_lonely_psus
from survey_stats import pdutil as u
from survey_stats.const import DECIMALS
from survey_stats import log
import gc
rbase = importr('base')
rstats = importr('stats')
rsvy = importr('survey')
rfeather = importr('feather', on_conflict='warn')
logger = log.getLogger()
def dim_design(d):
return pandas2ri.ri2py(rbase.dim(d[d.names.index('variables')]))
def subset_survey(des, filt, qn=None):
# filt is a dict with vars as keys and list of acceptable values as levels
# example from R:
# subset(dclus1, sch.wide=="Yes" & comp.imp=="Yes"
if not len(filt.keys()) > 0:
# empty filter, return original design object
return des
filtered = rbase.Reduce(
"&",
[filter_survey_var(des, k, v) for k, v in filt.items()] +
([rm_nan_survey_var(des, qn)] if qn else [])
)
return rsvy.subset_survey_design(des, filtered)
def fetch_stats_by(des, qn_f, r, vs):
lvl_f = '~%s' % '+'.join(vs)
ct_f = '%s + %s' % (lvl_f, qn_f[1:])
logger.info('gen stats for interaction level', lvl_f=lvl_f, qn_f=qn_f, ct_f=ct_f, r=r)
cols = vs + ['mean', 'se', 'ci_l', 'ci_u']
df = svybyci_xlogit(Formula(qn_f), Formula(lvl_f), des, svyciprop_xlogit, vartype=['se', 'ci'])
df = pandas2ri.ri2py(df)
df.columns = cols
df = df.set_index(vs)
cts = svyby_nodrop(Formula(lvl_f), Formula(ct_f), des, rsvy.unwtd_count, keep_var=True)
cts = pandas2ri.ri2py(cts).fillna(0.0)
cts.columns = vs + ['eql', 'ct', 'se_ignore']
cts = cts.set_index(vs)
cts['eql'] = cts.eql.apply(lambda x: x == 'TRUE' if type(x) == str else x > 0)
counts = cts.ct[cts.eql == True].tolist()
ssizes = cts.groupby(vs).sum()['ct']
df = df.assign(count=counts, sample_size=ssizes)
if df.shape[0] > 0:
df['response'] = r
df['level'] = len(vs)
rdf = u.fill_none(df.round(DECIMALS)).reset_index()
logger.info('create svyby df', df=rdf, vars=vs, eq=cts)
return rdf
def fetch_stats_totals(des, qn_f, r):
total_ci = svyciprop_xlogit(Formula(qn_f), des, multicore=False)
# extract stats
logger.info('fetching stats totals', r=r, q=qn_f)
cts = rsvy.svyby(Formula(qn_f), Formula(qn_f), des,
rsvy.unwtd_count, na_rm=True,
na_rm_by=True, na_rm_all=True, multicore=False)
cts = pandas2ri.ri2py(cts)
cols = ['eql', 'ct', 'se_ignore']
cts.columns = cols
ct = cts.ct[cts.eql == 1].sum()
ss = cts.ct.sum()
res = {'level': 0,
'response': r,
'mean': u.guard_nan(
rbase.as_numeric(total_ci)[0]) if total_ci else None,
'se': u.guard_nan(
rsvy.SE(total_ci)[0]) if total_ci else None,
'ci_l': u.guard_nan(
rbase.attr(total_ci, 'ci')[0]) if total_ci else None,
'ci_u': u.guard_nan(
rbase.attr(total_ci, 'ci')[1]) if total_ci else None,
'count': ct,
'sample_size': ss
}
# round as appropriate
logger.info('finished computation lvl1', res=res,
total_ci=total_ci, ct=ct, ss=ss)
res = pd.DataFrame([res]).round(DECIMALS)
return u.fill_none(res)
def fetch_stats(des, qn, r, vs=[], filt={}):
# ex: ~qn8
rbase.gc()
gc.collect()
qn_f = '~I(%s=="%s")' % (qn, r)
logger.info('subsetting des with filter', filt=filt)
des = subset_survey(des, filt)
logger.info('done subsetting')
dfs = [fetch_stats_totals(des, qn_f, r)]
levels = [vs[:k+1] for k in range(len(vs))]
sts = map(lambda lvl: fetch_stats_by(des, qn_f, r, lvl), levels)
dfz = pd.concat(dfs + sts, ignore_index=True)
# get stats_by_fnats for each level of interactions in vars
# using svyby to compute across combinations of loadings
logger.info('finished computations, appending dfs', dfs=dfz)
return u.fill_none(dfz) # .round(DECIMALS)
def subset(d, filter):
return d._replace(des=subset_survey(d, filter))
def des_from_feather(fthr_file, denovo=False, fpc=False, design='cluster'):
rbase.gc()
gc.collect()
if fpc and design=='cluster':
fix_lonely_psus()
rdf = rfeather.read_feather(fthr_file)
logger.info('creating survey design from data and annotations',
cols=list(rbase.colnames(rdf)))
strata = '~strata'
if denovo:
strata = '~year+strata'
res = rsvy.svydesign(
id=(Formula('~psu') if design == 'cluster' else Formula('~1')),
weight=Formula('~weight'),
strata=Formula(strata), data=rdf, nest=True,
fpc=(Formula('~fpc') if fpc else ro.NULL))
rbase.gc()
gc.collect()
return res
def des_from_survey_db(tbl, db, host, port, denovo=False, fpc=False,design='cluster'):
strata = '~strata'
if denovo:
strata = '~yr+sitecode'
return rsvy.svydesign(id=Formula('~psu'), weight=Formula('~weight'),
strata=Formula(strata), nest=True,
fpc=(Formula('~fpc') if fpc else ro.NULL),
data=tbl, dbname=db, host=host, port=port,
dbtype='MonetDB.R')
|
semanticbits/survey_stats
|
src/survey_stats/survey.py
|
Python
|
bsd-2-clause
| 5,498 | 0.002001 |
#!/usr/bin/python
'''
Cronostamper test suit:
Simple trigger simulator. Open a socket and execute
/oneShot when someone get connected and exit.
"oneshot" activate the GPIO 7 just one time.
Nacho Mas Junary-2017
'''
import socket
import commands
import sys
import time
import datetime
from thread import *
HOST = '' # Symbolic name meaning all available interfaces
PORT = 7777 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Starting CronoStamper Sockets Server.'
print 'Socket created'
#Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
#Start listening on socket
s.listen(10)
print 'Socket now listening'
def clientthread(conn):
rst=commands.getoutput('./oneShot')
d = str(datetime.datetime.fromtimestamp(float(rst)))
conn.sendall(d+'\r\n')
print d
conn.close()
#now keep talking with the client
while 1:
#wait to accept a connection - blocking call
conn, addr = s.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
#start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
start_new_thread(clientthread ,(conn,))
s.close()
|
nachoplus/cronoStamper
|
tools/cameraSimulator.py
|
Python
|
gpl-2.0
| 1,376 | 0.021076 |
import threading
from mock import patch
from uuid import uuid4
from changes_lxc_wrapper.cli.wrapper import WrapperCommand
def generate_jobstep_data():
# this must generic a *valid* dataset that should result in a full
# run
return {
'status': {'id': 'queued'},
'data': {},
'expectedSnapshot': None,
'snapshot': {
'id': 'a1028849e8cf4ff0a7d7fdfe3c4fe925',
},
}
def setup_function(function):
assert threading.activeCount() == 1
def teardown_function(function):
assert threading.activeCount() == 1
@patch.object(WrapperCommand, 'run_build_script')
def test_local_run(mock_run):
command = WrapperCommand([
'--', 'echo 1',
])
command.run()
mock_run.assert_called_once_with(
release='precise',
post_launch=None,
snapshot=None,
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['echo 1'],
script=None,
flush_cache=False,
clean=False,
keep=False,
)
@patch('changes_lxc_wrapper.cli.wrapper.ChangesApi')
@patch.object(WrapperCommand, 'run_build_script')
def test_remote_run(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
mock_run.assert_called_once_with(
release='precise',
post_launch=None,
snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925',
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex],
flush_cache=False,
clean=False,
keep=False,
)
@patch('changes_lxc_wrapper.cli.wrapper.ChangesApi')
@patch.object(WrapperCommand, 'run_build_script')
def test_already_finished_job(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
jobstep_data['status']['id'] = 'finished'
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
assert not mock_run.called
@patch('changes_lxc_wrapper.cli.wrapper.ChangesApi')
@patch.object(WrapperCommand, 'run_build_script')
def test_non_default_release(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
jobstep_data['data']['release'] = 'fakerelease'
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
mock_run.assert_called_once_with(
release='fakerelease',
post_launch=None,
snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925',
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex],
flush_cache=False,
clean=False,
keep=False,
)
|
dropbox/changes-lxc-wrapper
|
tests/cli/test_wrapper.py
|
Python
|
apache-2.0
| 3,544 | 0.000564 |
import collections
import copy
from typing import Dict, Mapping, Optional, Set
import fontTools.misc.py23
import fontTools.ttLib
import fontTools.ttLib.tables.otTables as otTables
import statmake.classes
def apply_stylespace_to_variable_font(
stylespace: statmake.classes.Stylespace,
varfont: fontTools.ttLib.TTFont,
additional_locations: Mapping[str, float],
):
"""Generate and apply a STAT table to a variable font.
additional_locations: used in subset Designspaces to express where on which other
axes not defined by an <axis> element the varfont stands. The primary use-case is
defining a complete STAT table for variable fonts that do not include all axes of a
family (either because they intentionally contain just a subset of axes or because
the designs are incompatible).
"""
name_table, stat_table = generate_name_and_STAT_variable(
stylespace, varfont, additional_locations
)
varfont["name"] = name_table
varfont["STAT"] = stat_table
def generate_name_and_STAT_variable(
stylespace: statmake.classes.Stylespace,
varfont: fontTools.ttLib.TTFont,
additional_locations: Mapping[str, float],
):
"""Generate a new name and STAT table ready for insertion."""
if "fvar" not in varfont:
raise ValueError(
"Need a variable font with the fvar table to determine which instances "
"are present."
)
stylespace_name_to_axis = {a.name.default: a for a in stylespace.axes}
fvar_name_to_axis = {}
name_to_tag: Dict[str, str] = {}
name_to_index: Dict[str, int] = {}
index = 0
for index, fvar_axis in enumerate(varfont["fvar"].axes):
fvar_axis_name = _default_name_string(varfont, fvar_axis.axisNameID)
try:
stylespace_axis = stylespace_name_to_axis[fvar_axis_name]
except KeyError:
raise ValueError(
f"No stylespace entry found for axis name '{fvar_axis_name}'."
)
if fvar_axis.axisTag != stylespace_axis.tag:
raise ValueError(
f"fvar axis '{fvar_axis_name}' tag is '{fvar_axis.axisTag}', but "
f"Stylespace tag is '{stylespace_axis.tag}'."
)
fvar_name_to_axis[fvar_axis_name] = fvar_axis
name_to_tag[fvar_axis_name] = fvar_axis.axisTag
name_to_index[fvar_axis_name] = index
for axis_name in additional_locations:
try:
stylespace_axis = stylespace_name_to_axis[axis_name]
except KeyError:
raise ValueError(f"No stylespace entry found for axis name '{axis_name}'.")
name_to_tag[stylespace_axis.name.default] = stylespace_axis.tag
index += 1
name_to_index[stylespace_axis.name.default] = index
# First, determine which stops are used on which axes. The STAT table must contain
# a name for each stop that is used on each axis, so each stop must have an entry
# in the Stylespace. Also include locations in additional_locations that can refer
# to axes not present in the current varfont.
stylespace_stops: Dict[str, Set[float]] = {}
for axis in stylespace.axes:
stylespace_stops[axis.tag] = {l.value for l in axis.locations}
for named_location in stylespace.locations:
for name, value in named_location.axis_values.items():
stylespace_stops[name_to_tag[name]].add(value)
axis_stops: Mapping[str, Set[float]] = collections.defaultdict(set) # tag to stops
for instance in varfont["fvar"].instances:
for k, v in instance.coordinates.items():
if v not in stylespace_stops[k]:
raise ValueError(
f"There is no Stylespace entry for stop {v} on axis {k}."
)
axis_stops[k].add(v)
for k, v in additional_locations.items():
axis_tag = name_to_tag[k]
if v not in stylespace_stops[axis_tag]:
raise ValueError(
f"There is no Stylespace entry for stop {v} on axis {k} (from "
"additional locations)."
)
axis_stops[axis_tag].add(v)
# Construct temporary name and STAT tables for returning at the end.
name_table = copy.deepcopy(varfont["name"])
stat_table = _new_empty_STAT_table()
# Generate axis records. Reuse an axis' name ID if it exists, else make a new one.
for axis_name, axis_tag in name_to_tag.items():
stylespace_axis = stylespace_name_to_axis[axis_name]
if axis_name in fvar_name_to_axis:
axis_name_id = fvar_name_to_axis[axis_name].axisNameID
else:
axis_name_id = name_table.addMultilingualName(
stylespace_axis.name.mapping, mac=False
)
axis_record = _new_axis_record(
tag=axis_tag, name_id=axis_name_id, ordering=stylespace_axis.ordering
)
stat_table.table.DesignAxisRecord.Axis.append(axis_record)
# Generate formats 1, 2 and 3.
for axis in stylespace.axes:
for location in axis.locations:
if location.value not in axis_stops[axis.tag]:
continue
axis_value = otTables.AxisValue()
name_id = name_table.addMultilingualName(location.name.mapping, mac=False)
location.fill_in_AxisValue(
axis_value, axis_index=name_to_index[axis.name.default], name_id=name_id
)
stat_table.table.AxisValueArray.AxisValue.append(axis_value)
# Generate format 4.
for named_location in stylespace.locations:
if all(
name_to_tag[k] in axis_stops and v in axis_stops[name_to_tag[k]]
for k, v in named_location.axis_values.items()
):
stat_table.table.Version = 0x00010002
axis_value = otTables.AxisValue()
name_id = name_table.addMultilingualName(
named_location.name.mapping, mac=False
)
named_location.fill_in_AxisValue(
axis_value,
axis_name_to_index=name_to_index,
name_id=name_id,
axis_value_record_type=otTables.AxisValueRecord,
)
stat_table.table.AxisValueArray.AxisValue.append(axis_value)
stat_table.table.ElidedFallbackNameID = stylespace.elided_fallback_name_id
return name_table, stat_table
def _default_name_string(otfont: fontTools.ttLib.TTFont, name_id: int) -> str:
"""Return first name table match for name_id for language 'en'."""
name = otfont["name"].getName(name_id, 3, 1, 0x0409).toUnicode()
if name is not None:
return name
raise ValueError(f"No default Windows record for id {name_id}.")
def _new_empty_STAT_table():
stat_table = fontTools.ttLib.newTable("STAT")
stat_table.table = otTables.STAT()
stat_table.table.Version = 0x00010001
stat_table.table.DesignAxisRecord = otTables.AxisRecordArray()
stat_table.table.DesignAxisRecord.Axis = []
stat_table.table.AxisValueArray = otTables.AxisValueArray()
stat_table.table.AxisValueArray.AxisValue = []
return stat_table
def _new_axis_record(tag: str, name_id: int, ordering: Optional[int]):
if ordering is None:
raise ValueError("ordering must be an integer.")
axis_record = otTables.AxisRecord()
axis_record.AxisTag = fontTools.misc.py23.Tag(tag)
axis_record.AxisNameID = name_id
axis_record.AxisOrdering = ordering
return axis_record
|
googlefonts/statmake
|
statmake/lib.py
|
Python
|
mit
| 7,470 | 0.002142 |
from . import strip
class Segment(strip.Strip):
"""Represents an offset, length segment within a strip."""
def __init__(self, strip, length, offset=0):
if offset < 0 or length < 0:
raise ValueError('Segment indices are non-negative.')
if offset + length > len(strip):
raise ValueError('Segment too long.')
self.strip = strip
self.offset = offset
self.length = length
def __getitem__(self, index):
return self.strip[self._fix_index(index)]
def __setitem__(self, index, value):
self.strip[self._fix_index(index)] = value
def __len__(self):
return self.length
def next(self, length):
"""Return a new segment starting right after self in the same buffer."""
return Segment(self.strip, length, self.offset + self.length)
def _fix_index(self, index):
if isinstance(index, slice):
raise ValueError('Slicing segments not implemented.')
if index < 0:
index += self.length
if index >= 0 and index < self.length:
return self.offset + index
raise IndexError('Index out of range')
def make_segments(strip, length):
"""Return a list of Segments that evenly split the strip."""
if len(strip) % length:
raise ValueError('The length of strip must be a multiple of length')
s = []
try:
while True:
s.append(s[-1].next(length) if s else Segment(strip, length))
except ValueError:
return s
|
rec/BiblioPixel
|
bibliopixel/layout/geometry/segment.py
|
Python
|
mit
| 1,539 | 0.00065 |
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^sql/$', 'sqlparser.views.parse_sql'),
)
|
slack-sqlbot/slack-sqlbot
|
slack_sqlbot/urls.py
|
Python
|
mit
| 164 | 0 |
import urllib2
import json
import sys
import os
import wunderData
def get_coord(exifdict):
'''
Purpose: The purpose of this script is to extract the Latitude and Longitude from the EXIF data
Inputs: exifdict: structure storing the image's EXIF data.
Outputs: coords: A tuple of the Latitude and Longitude in Decimal form
Returns: (lat,lon)
Assumptions: The EXIF data is valid.
'''
values = exifdict['gps gpslatitude'][1:-1].split(", ")
s = values[2]
df = float(values[0])
mf = float(values[1])
smath = s.split("/")
sf = float(smath[0])/float(smath[1])
lat = df + mf/60 + sf/3600
if exifdict['gps gpslatituderef'] == 'S':
lat = lat*(-1)
values = exifdict['gps gpslongitude'][1:-1].split(", ")
s = values[2]
df = float(values[0])
mf = float(values[1])
smath = s.split("/")
sf = float(smath[0])/float(smath[1])
lon = df + mf/60 + sf/3600
if exifdict['gps gpslongituderef'] == 'W':
lon = lon*(-1)
return (lat,lon)
def coord_to_zip(coord,googlegeokey):
'''
Purpose: The purpose of this script is to convert Latitude and Longitude to a ZIP Code
Inputs: coord: tuple holding latitude and longitude, googlegeokey: The Google geocoding API
Outputs: string of 5 digit long ZIP code.
Returns: zipcode
Assumptions: The EXIF data is valid.
'''
try:
url = "https://maps.googleapis.com/maps/api/geocode/json?latlng="+str(coord[0])+","+str(coord[1])+"&key="+googlegeokey
c = urllib2.urlopen(url)
response = c.read()
parsedResults = json.loads(response)
zipcode = parsedResults['results'][0]['address_components'][-1]['long_name']
except Exception:
print "Unable to retrieve data: ", sys.exc_info()[0]
zipcode = "99999"
finally:
return zipcode
def zip_to_coord(zipcode,googlegeokey):
'''
Purpose: The purpose of this script is to convert ZIP Code to a Latitude and Longitude
Inputs: zipcode: 5 digit long ZIP code.
Outputs: coord: tuple holding latitude and longitude
Returns: (lat,lon)
Assumptions: The EXIF data is valid.
'''
try:
url = 'https://maps.googleapis.com/maps/api/geocode/json?address='+zipcode+'&key='+googlegeokey
c = urllib2.urlopen(url)
results = c.read()
parsedResults = json.loads(results)
lat = float(parsedResults['results'][0]['geometry']['location']['lat'])
lon = float(parsedResults['results'][0]['geometry']['location']['lng'])
except Exception:
print "Unable to retrieve data: ", sys.exc_info()[0]
(lat,lon) = (0.0,0.0)
finally:
return (lat,lon)
def sun_position(exifdict):
'''
Purpose: Identify whether an image was taken during sunrise or sunset.
Inputs: exifdict: structure storing the image's EXIF data.
Outputs: string
Returns: sunrise,sunset,night,day
Assumptions: N/A
'''
coord = get_coord(exifdict)
wData = wunderData.get_data(str(coord[0])+","+str(coord[1]))
sunriseTime = wData['sunrise'].split(':')
sunsetTime = wData['sunset'].split(':')
sunriseTarget = (int(sunriseTime[0])*60)+int(sunriseTime[1])
sunsetTarget = (int(sunsetTime[0])*60)+int(sunsetTime[1])
hoursTime = (str(exifdict['exif datetimeoriginal']).split(' '))[1].split(':')
pictureTime = (int(hoursTime[0])*60)+int(hoursTime[1])+int(float(hoursTime[2])/60)
if ((pictureTime >= (sunriseTarget - 15)) & (pictureTime <= (sunriseTarget + 30))):
return 'sunrise'
elif ((pictureTime >= (sunsetTarget - 15)) & (pictureTime <= (sunsetTarget + 30))):
return 'sunset'
elif ((pictureTime > (sunsetTarget + 15))|(pictureTime < (sunriseTarget - 15))):
return 'night'
else:
return 'day'
|
Aerolyzer/Aerolyzer
|
aerolyzer/location.py
|
Python
|
apache-2.0
| 3,932 | 0.007121 |
# -*- coding: utf-8 -*-
import contextlib
import logging
import os
import os.path
import yaml
from bravado_core.spec import is_yaml
from six.moves import urllib
from six.moves.urllib import parse as urlparse
from bravado.compat import json
from bravado.requests_client import RequestsClient
log = logging.getLogger(__name__)
def is_file_scheme_uri(url):
return urlparse.urlparse(url).scheme == u'file'
class FileEventual(object):
"""Adaptor which supports the :class:`crochet.EventualResult`
interface for retrieving api docs from a local file.
"""
class FileResponse(object):
def __init__(self, data):
self.text = data
self.headers = {}
def json(self):
return json.loads(self.text)
def __init__(self, path):
self.path = path
self.is_yaml = is_yaml(path)
def get_path(self):
if not self.path.endswith('.json') and not self.is_yaml:
return self.path + '.json'
return self.path
def wait(self, timeout=None):
with contextlib.closing(urllib.request.urlopen(self.get_path())) as fp:
content = fp.read()
return self.FileResponse(content)
def result(self, *args, **kwargs):
return self.wait(*args, **kwargs)
def cancel(self):
pass
def request(http_client, url, headers):
"""Download and parse JSON from a URL.
:param http_client: a :class:`bravado.http_client.HttpClient`
:param url: url for api docs
:return: an object with a :func`wait` method which returns the api docs
"""
if is_file_scheme_uri(url):
return FileEventual(url)
request_params = {
'method': 'GET',
'url': url,
'headers': headers,
}
return http_client.request(request_params)
class Loader(object):
"""Abstraction for loading Swagger API's.
:param http_client: HTTP client interface.
:type http_client: http_client.HttpClient
:param request_headers: dict of request headers
"""
def __init__(self, http_client, request_headers=None):
self.http_client = http_client
self.request_headers = request_headers or {}
def load_spec(self, spec_url, base_url=None):
"""Load a Swagger Spec from the given URL
:param spec_url: URL to swagger.json
:param base_url: TODO: need this?
:returns: json spec in dict form
"""
response = request(
self.http_client,
spec_url,
self.request_headers,
).result()
content_type = response.headers.get('content-type', '').lower()
if is_yaml(spec_url, content_type):
return self.load_yaml(response.text)
else:
return response.json()
def load_yaml(self, text):
"""Load a YAML Swagger spec from the given string, transforming
integer response status codes to strings. This is to keep
compatibility with the existing YAML spec examples in
https://github.com/OAI/OpenAPI-Specification/tree/master/examples/v2.0/yaml
:param text: String from which to parse the YAML.
:type text: basestring
:return: Python dictionary representing the spec.
:raise: yaml.parser.ParserError: If the text is not valid YAML.
"""
data = yaml.safe_load(text)
for path, methods in iter(data.get('paths', {}).items()):
for method, operation in iter(methods.items()):
if 'responses' in operation:
operation['responses'] = dict(
(str(code), response)
for code, response in iter(
operation['responses'].items()
)
)
return data
# TODO: Adding the file scheme here just adds complexity to request()
# Is there a better way to handle this?
def load_file(spec_file, http_client=None):
"""Loads a spec file
:param spec_file: Path to swagger.json.
:param http_client: HTTP client interface.
:return: validated json spec in dict form
:raise: IOError: On error reading swagger.json.
"""
file_path = os.path.abspath(spec_file)
url = urlparse.urljoin(u'file:', urllib.request.pathname2url(file_path))
# When loading from files, everything is relative to the spec file
dir_path = os.path.dirname(file_path)
base_url = urlparse.urljoin(u'file:', urllib.request.pathname2url(dir_path))
return load_url(url, http_client=http_client, base_url=base_url)
def load_url(spec_url, http_client=None, base_url=None):
"""Loads a Swagger spec.
:param spec_url: URL for swagger.json.
:param http_client: HTTP client interface.
:param base_url: Optional URL to be the base URL for finding API
declarations. If not specified, 'basePath' from the
resource listing is used.
:return: validated spec in dict form
:raise: IOError, URLError: On error reading api-docs.
"""
if http_client is None:
http_client = RequestsClient()
loader = Loader(http_client=http_client)
return loader.load_spec(spec_url, base_url=base_url)
|
analogue/bravado
|
bravado/swagger_model.py
|
Python
|
bsd-3-clause
| 5,223 | 0.000191 |
#! /usr/bin/env python3
import os
import zipfile
import sys
import settings
__author__ = 'tigge'
def main():
zipfilename = os.path.join(settings.get("folder"), settings.get("basename") + ".zip")
zip = zipfile.ZipFile(zipfilename, mode="w", )
for filename in os.listdir(settings.get("folder")):
print(filename, os.path.basename(zipfilename),filename == os.path.basename(zipfilename))
if not filename.startswith(".t2w-temp-") and filename != os.path.basename(zipfilename):
zip.write(os.path.join(settings.get("folder"), filename), arcname=filename)
zip.close()
if __name__ == "__main__":
sys.exit(main())
|
Tigge/trello-to-web
|
zip.py
|
Python
|
mit
| 659 | 0.007587 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
"""Close the session to avoid the main thread from hanging.
If input pipeline triggers any error, the infeed thread dies but the main
thread for TPU computation waits for the infeed enqueue forever. Close the
Session to cancel the main thread Session.run execution.
We sleep for a few seconds before closing to give some time for the TPU
compilation error, if any, propagating, from TPU to CPU host. Compilation
errors should be reported by the main thread so that the program can be
interrupted and users can take action. Due to a race condition, the
infeed thread might see an error first. Closing the session here
immediately would result in a session cancellation exception in the main
thread, instead of the expected compile error. User code that depends on
having the proper exception type will therefore be confused.
"""
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal
)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _InternalTPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(
key, tensor))
return predictions
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
Current limitations:
--------------------
1. Outside compilation does not work yet (b/79991729).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`. Instead, the user can pass the training hooks as
an argument to `TPUEstimator.train()`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU besides the one on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a `WarmStartSettings`,
then all variables are warm-started, and it is assumed
that vocabularies and Tensor names are unchanged.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu,
eval_on_tpu)
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None):
if mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
'TPUEstimator only handles mode PREDICT for export_savedmodel(); '
'got {}.'.format(mode))
super(TPUEstimator, self)._add_meta_graph_for_mode(builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables,
mode=mode)
if self._export_to_tpu:
input_receiver_fn_map = {_REWRITE_FOR_INFERENCE_MODE:
input_receiver_fn_map[mode]}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
(super(TPUEstimator, self).
_add_meta_graph_for_mode(builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=False,
mode=mode,
export_tags=export_tags))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(
features, labels, mode, config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
tensors_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs)
)
tensors = nest.flatten(tensors_dict)
tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)]
# We cannot return anything other than `tpu_tensors` here so we capture
# the rest for later use.
capture.capture((estimator_spec, tensors_dict, tensors))
return tpu_tensors
tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)
estimator_spec, tensors_dict, tensors = capture.get()
# Reconstruct `tensors`, but with `tpu_tensors` replaced with
# `tpu_tensors_on_cpu`.
new_tensors = [
tpu_tensors_on_cpu.pop(0) if _is_tpu_tensor(t) else t
for t in tensors
]
# Reconstruct `tensors_dict`.
new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)
# Reconstruct `export_outputs`.
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_tensors_dict)
)
return estimator_spec._replace(export_outputs=new_export_outputs)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'],
_BATCH_SIZE_KEY, batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=60*1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60*1000),
]
else:
raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' %
shutdown_mode)
shutdown_hooks.append(session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks
))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency)
])
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps)
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _is_tpu_tensor(tensor):
if not isinstance(tensor, ops.Tensor):
return False
try:
tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access
except ValueError:
return True
else:
return False
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_util._DatasetInitializerHook(iterator)
# pylint: enable=protected-access
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.key = value
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
|
yanchen036/tensorflow
|
tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
|
Python
|
apache-2.0
| 121,921 | 0.006389 |
#!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: dellos9_command
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Run commands on remote devices running Dell OS9
description:
- Sends arbitrary commands to a Dell OS9 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos9_config) to configure Dell OS9 devices.
extends_documentation_fragment: dellos9
options:
commands:
description:
- List of commands to send to the remote dellos9 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can be done via M(dnos_config) module
as well.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos9_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS9
dellos9_command:
commands: show version
wait_for: result[0] contains OS9
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos9_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos9_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains OS9
- result[1] contains Loopback
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner, FailedConditionsError
from ansible.module_utils.network import NetworkModule, NetworkError
import ansible.module_utils.dellos9
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = NetworkModule(argument_spec=spec,
connect_on_load=False,
supports_check_mode=True)
commands = module.params['commands']
conditionals = module.params['wait_for'] or list()
warnings = list()
runner = CommandRunner(module)
for cmd in commands:
if module.check_mode and not cmd.startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd)
else:
if cmd.startswith('conf'):
module.fail_json(msg='dellos9_command does not support running '
'config mode commands. Please use '
'dellos9_config instead')
runner.add_command(cmd)
for item in conditionals:
runner.add_conditional(item)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
try:
runner.run()
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
result = dict(changed=False)
result['stdout'] = list()
for cmd in commands:
try:
output = runner.get_command(cmd)
except ValueError:
output = 'command not executed due to check_mode, see warnings'
result['stdout'].append(output)
result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
sivel/ansible-modules-core
|
network/dellos9/dellos9_command.py
|
Python
|
gpl-3.0
| 6,997 | 0.001143 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-07 15:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('thresher', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='pybossa_url',
field=models.CharField(blank=True, default=b'', max_length=200),
),
]
|
Goodly/TextThresher
|
thresher/migrations/0002_auto_20170607_1544.py
|
Python
|
apache-2.0
| 473 | 0 |
from bs4 import BeautifulSoup
from urllib.parse import quote
from core import models
def get_setting(setting_name, setting_group_name, default=None):
try:
setting = models.Setting.objects.get(
name=setting_name,
group__name=setting_group_name,
)
return setting.value
except models.Setting.DoesNotExist:
if default:
return default
return ''
def strip_html_tags(raw_html):
return BeautifulSoup(raw_html, "html.parser").get_text()
def add_content_disposition_header(
response,
filename,
disposition='attachment'
):
"""
Add an RFC5987 / RFC6266 compliant Content-Disposition header to an
HttpResponse to tell the browser to save the HTTP response to a file.
Args:
response (django.http.response.HttpResponseBase): the response object.
filename (str): the name that the file should be served under.
disposition (str): the disposition: 'inline' or 'attachment' (default)
"""
try:
filename.encode('ascii')
file_expr = 'filename="{}"'.format(filename)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
response['Content-Disposition'] = f'{disposition}; {file_expr}'
return response
|
ubiquitypress/rua
|
src/core/util.py
|
Python
|
gpl-2.0
| 1,313 | 0 |
suite = {
"name" : "mx",
"libraries" : {
# ------------- Libraries -------------
"JACOCOAGENT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoagent-0.7.1-1.jar"],
"sha1" : "2f73a645b02e39290e577ce555f00b02004650b0",
},
"JACOCOREPORT" : {
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/jacoco/jacocoreport-0.7.1-2.jar"],
"sha1" : "a630436391832d697a12c8f7daef8655d7a1efd2",
},
"FINDBUGS_DIST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/findbugs-3.0.0.zip",
"http://sourceforge.net/projects/findbugs/files/findbugs/3.0.0/findbugs-3.0.0.zip/download",
],
"sha1" : "6e56d67f238dbcd60acb88a81655749aa6419c5b",
},
"SIGTEST" : {
"urls" : [
"http://hg.netbeans.org/binaries/A7674A6D78B7FEA58AF76B357DAE6EA5E3FDFBE9-apitest.jar",
],
"sha1" : "a7674a6d78b7fea58af76b357dae6ea5e3fdfbe9",
},
"CODESNIPPET-DOCLET" : {
"urls" : [
"http://repo1.maven.org/maven2/org/apidesign/javadoc/codesnippet-doclet/0.5/codesnippet-doclet-0.5.jar",
],
"sha1" : "e9f37916a0ee0f2f6dc0c1d4ae0ce6e7c7a6e874",
},
"JUNIT" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11.jar",
],
"sha1" : "4e031bb61df09069aeb2bffb4019e7a5034a4ee0",
"eclipse.container" : "org.eclipse.jdt.junit.JUNIT_CONTAINER/4",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/junit-4.11-sources.jar",
"https://search.maven.org/remotecontent?filepath=junit/junit/4.11/junit-4.11-sources.jar",
],
"sourceSha1" : "28e0ad201304e4a4abf999ca0570b7cffc352c3c",
"dependencies" : ["HAMCREST"],
"licence" : "CPL",
"maven" : {
"groupId" : "junit",
"artifactId" : "junit",
"version" : "4.11",
}
},
"CHECKSTYLE" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/checkstyle-6.0-all.jar",
"jar:http://sourceforge.net/projects/checkstyle/files/checkstyle/6.0/checkstyle-6.0-bin.zip/download!/checkstyle-6.0/checkstyle-6.0-all.jar",
],
"sha1" : "2bedc7feded58b5fd65595323bfaf7b9bb6a3c7a",
"licence" : "LGPLv21",
"maven" : {
"groupId" : "com.puppycrawl.tools",
"artifactId" : "checkstyle",
"version" : "6.0",
}
},
"HAMCREST" : {
"urls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
"sha1" : "42a25dc3219429f0e5d060061f71acb49bf010a0",
"sourceUrls" : [
"https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/hamcrest-core-1.3-sources.jar",
"https://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3-sources.jar",
],
"sourceSha1" : "1dc37250fbc78e23a65a67fbbaf71d2e9cbc3c0b",
"licence" : "BSD-new",
"maven" : {
"groupId" : "org.hamcrest",
"artifactId" : "hamcrest-core",
"version" : "1.3",
}
},
},
"licenses" : {
"GPLv2-CPE" : {
"name" : "GNU General Public License, version 2, with the Classpath Exception",
"url" : "http://openjdk.java.net/legal/gplv2+ce.html"
},
"BSD-new" : {
"name" : "New BSD License (3-clause BSD license)",
"url" : "http://opensource.org/licenses/BSD-3-Clause"
},
"CPL" : {
"name" : "Common Public License Version 1.0",
"url" : "http://opensource.org/licenses/cpl1.0.txt"
},
"LGPLv21" : {
"name" : "GNU Lesser General Public License, version 2.1",
"url" : "http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html"
},
},
"projects" : {
"com.oracle.mxtool.junit" : {
"subDir" : "java",
"sourceDirs" : ["src"],
"dependencies" : [
"JUNIT",
],
"javaCompliance" : "1.8",
},
},
}
|
smarr/mxtool
|
mx.mx/suite.py
|
Python
|
gpl-2.0
| 4,144 | 0.021477 |
import numpy as np
import pylab as pl
from scipy.integrate import odeint
from scipy.interpolate import interp1d
phi = [0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 28.0, 32.0, 36.0, 40.0, 44.0, 48.0, 52.0, 56.0, 60.0, 64.0, 68.0, 72.0, 76.0, 80.0, 84.0, 88.0, 92.0, 96.0, 100.0, 104.0, 108.0, 112.0, 116.0, 120.0, 124.0, 128.0, 132.0, 136.0, 140.0, 144.0, 148.0, 152.0, 156.0, 160.0, 164.0, 168.0, 172.0, 176.0, 180.0]
T_magnetic_torque = [-4.2099750000000006e-07, -0.0006306440000000001, -0.0012431405000000002, -0.0027673415, -0.0037627800000000007, -0.004668125, -0.005372255, -0.005509490000000001, -0.006019965000000001, -0.006202910000000001, -0.007951475000000001, -0.009308495000000002, -0.00903189, -0.009031260000000001, -0.011202345, -0.01173942, -0.013634670000000002, -0.013729415, -0.013753075000000002, -0.014419475000000001, -0.0097538, -0.008428175000000001, -0.0028582085000000003, 0.001922431, 0.00836486, 0.010786545, 0.013908825000000001, 0.013557495000000001, 0.013209665, 0.013566455000000002, 0.011872665000000001, 0.011166470000000001, 0.009009595, 0.009028250000000002, 0.009307900000000001, 0.007950670000000002, 0.006194965, 0.0060320750000000005, 0.00558495, 0.0053764550000000005, 0.0046711700000000005, 0.003763025, 0.0026294870000000007, 0.001254253, 0.000597345, -4.944730000000001e-07]
T_torque = [7.600635000000001e-08, -0.00017802715, -0.00043366050000000005, -0.0013786395, -0.002051854, -0.0025863285000000003, -0.0029615285000000003, -0.0029484280000000003, -0.008016085000000001, -0.008393595, -0.01086897, -0.012900475000000002, -0.012870795, -0.01335537, -0.016747500000000002, -0.018461975000000002, -0.022139145000000002, -0.024000515000000004, -0.025957925, -0.030677990000000002, -0.029933050000000006, -0.037302300000000004, -0.03650815, -0.0453334, -0.02398515, -0.012230330000000003, -0.005922595000000001, -0.0013065115, 0.0007364700000000001, 0.0028762475000000003, 0.0035826000000000005, 0.0041284600000000005, 0.0029878625, 0.0038398150000000003, 0.004532675000000001, 0.0039266150000000005, 0.00301847, 0.0031519530000000003, 0.0030171505000000003, 0.0029608460000000005, 0.0025858875000000004, 0.002052134, 0.001297366, 0.0004423615, 0.00016526405, 1.6689750000000002e-08]
# static characteristic
pl.close()
pl.figure(figsize = [8, 5])
pl.plot(phi, T_magnetic_torque, 'b', label="$\mathrm{0.0~A$")
pl.plot(phi, T_torque, 'r', label="$\mathrm{0.8~A$")
pl.plot([0, 180], [0, 0], '--k')
pl.xlabel("$\\phi~\mathrm{(deg.)}$")
pl.ylabel("$T~\mathrm{(Nm)}$")
pl.legend(loc="lower right")
fn_chart_static = pythonlab.tempname("png")
pl.savefig(fn_chart_static, dpi=60)
pl.close()
# show in console
pythonlab.image(fn_chart_static)
J = 7.5e-5;
k = 2e-4
T_f = interp1d(phi, T_torque, kind = "linear")
def func(x, t):
dx = [0., 0.]
dx[0] = x[1]
dx[1] = (- T_f((x[0]*180/np.pi) % 180) - k*x[1]) * 1/J
return dx
x0 = [np.pi/6, 0]
time = np.linspace(0, 2, 1000)
y = odeint(func, x0, time)
# dynamic characteristic
pl.close()
pl.figure(figsize = [8, 5])
pl.subplot(2,1,1)
pl.plot(time, y[:,0]/np.pi*180 % 180, 'r-')
# pl.xlabel("$t~\mathrm{(s)}$")
pl.ylabel("$\\phi~\mathrm{(deg.)}$")
pl.xlim(0, 1.41)
pl.subplot(2,1,2)
pl.plot(time, y[:,1]/2/np.pi*60, 'r-')
pl.xlabel("$t~\mathrm{(s)}$")
pl.ylabel("$n~\mathrm{(rev./min.)}$")
pl.xlim(0, 1.41)
fn_chart_dynamic = pythonlab.tempname("png")
pl.savefig(fn_chart_dynamic, dpi=60)
pl.close()
# show in console
pythonlab.image(fn_chart_dynamic)
|
karban/agros2d
|
data/scripts/dc_motor_dynamic.py
|
Python
|
gpl-2.0
| 3,460 | 0.010116 |
# The docs say the processing time is less than 20 milliseconds
#PROCESSING_TIME = 0.015
PROCESSING_TIME = 0.010
INTERVAL_SCALE = 0.95
# Number of degrees for a small angle... if the angle is smaller than this then
# the rover won't try to turn, to help keep the path straight
SMALL_ANGLE = 7.0
# Ensure that the rover isn't in a hard turn for this kind of angle
SOFT_ANGLE = 15.0
FORCE_TURN_DIST = 40.0
FORCE_TURN_SQ = FORCE_TURN_DIST ** 2
BLOAT = 1.3 # make things 30 percent bigger
|
eklitzke/icfp08
|
src/constants.py
|
Python
|
isc
| 491 | 0.004073 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_drink_charde.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/food/shared_drink_charde.py
|
Python
|
mit
| 446 | 0.047085 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import inspect
class PerlIoSocketSsl(PerlPackage):
"""SSL sockets with IO::Socket interface"""
homepage = "http://search.cpan.org/~sullr/IO-Socket-SSL-2.052/lib/IO/Socket/SSL.pod"
url = "http://search.cpan.org/CPAN/authors/id/S/SU/SULLR/IO-Socket-SSL-2.052.tar.gz"
version('2.052', sha256='e4897a9b17cb18a3c44aa683980d52cef534cdfcb8063d6877c879bfa2f26673')
depends_on('perl-net-ssleay', type=('build', 'run'))
def configure(self, spec, prefix):
self.build_method = 'Makefile.PL'
self.build_executable = inspect.getmodule(self).make
# Should I do external tests?
config_answers = ['n\n']
config_answers_filename = 'spack-config.in'
with open(config_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
inspect.getmodule(self).perl('Makefile.PL', 'INSTALL_BASE={0}'.
format(prefix), input=f)
|
iulian787/spack
|
var/spack/repos/builtin/packages/perl-io-socket-ssl/package.py
|
Python
|
lgpl-2.1
| 1,217 | 0.003287 |
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_read_library_to_file'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_read_library_to_file.kb_read_library_to_fileImpl import kb_read_library_to_file
impl_kb_read_library_to_file = kb_read_library_to_file(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_read_library_to_file'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_read_library_to_file.convert_read_library_to_file,
name='kb_read_library_to_file.convert_read_library_to_file',
types=[dict])
self.method_authentication['kb_read_library_to_file.convert_read_library_to_file'] = 'required'
self.rpc_service.add(impl_kb_read_library_to_file.status,
name='kb_read_library_to_file.status',
types=[dict])
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = "Authentication required for " + \
"kb_read_library_to_file but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
MrCreosote/kb_read_library_to_file
|
lib/kb_read_library_to_file/kb_read_library_to_fileServer.py
|
Python
|
mit
| 23,263 | 0.00129 |
from platform import python_version
from django import get_version
from distutils.version import LooseVersion
DJANGO_VERSION = get_version()
PYTHON_VERSION = python_version()
# These means "less than or equal to DJANGO_FOO_BAR"
DJANGO_2_2 = LooseVersion(DJANGO_VERSION) < LooseVersion('3.0')
DJANGO_3_0 = LooseVersion(DJANGO_VERSION) < LooseVersion('3.1')
DJANGO_3_1 = LooseVersion(DJANGO_VERSION) < LooseVersion('3.2')
DJANGO_3_2 = LooseVersion('3.2') <= LooseVersion(DJANGO_VERSION) and LooseVersion(DJANGO_VERSION) < LooseVersion('3.3')
|
divio/django-cms
|
cms/utils/compat/__init__.py
|
Python
|
bsd-3-clause
| 545 | 0.00367 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import ErodeImage
def test_ErodeImage_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
kernel_file=dict(argstr='%s',
position=5,
xor=['kernel_size'],
),
kernel_shape=dict(argstr='-kernel %s',
position=4,
),
kernel_size=dict(argstr='%.4f',
position=5,
xor=['kernel_file'],
),
minimum_filter=dict(argstr='%s',
position=6,
usedefault=True,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = ErodeImage.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ErodeImage_outputs():
output_map = dict(out_file=dict(),
)
outputs = ErodeImage.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
blakedewey/nipype
|
nipype/interfaces/fsl/tests/test_auto_ErodeImage.py
|
Python
|
bsd-3-clause
| 1,597 | 0.028178 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.