text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import ply.lex
#
# Lexer setup
#
instructions = (
'NOP', 'INT', 'IPI', 'RETINT', 'CALL', 'RET', 'CLI', 'STI', 'HLT', 'RST', 'IDLE',
'PUSH', 'POP', 'INC', 'DEC', 'ADD', 'SUB', 'CMP', 'J', 'AND', 'OR', 'XOR', 'NOT',
'SHL', 'SHR', 'SHRS', 'LW', 'LS', 'LB', 'LI', 'LIU', 'LA', 'STW', 'STS', 'STB',
'MOV', 'SWP', 'MUL', 'UDIV', 'MOD', 'CMPU', 'CAS', 'SIS', 'DIV',
'BE', 'BNE', 'BS', 'BNS', 'BZ', 'BNZ', 'BO', 'BNO', "BL", "BLE", "BGE", "BG",
'SETE', 'SETNE', 'SETZ', 'SETNZ', 'SETO', 'SETNO', 'SETS', 'SETNS', "SETL", "SETLE", "SETGE", "SETG",
'SELE', 'SELNE', 'SELZ', 'SELNZ', 'SELS', 'SELNS', 'SELO', 'SELNO', "SELL", "SELLE", "SELGE", "SELG",
'LPM', 'CTR', 'CTW', 'FPTC'
)
math_instructions = (
'PUSHW', 'SAVEW', 'POPW', 'LOADW', 'POPUW', 'LOADUW', 'SAVE', 'LOAD',
'INCL', 'DECL', 'ADDL', 'MULL', 'DIVL', 'MODL', 'UDIVL', 'UMODL',
'DUP', 'DUP2', 'SWPL', 'DROP', 'SYMDIVL', 'SYMMODL',
'PUSHL', 'POPL'
)
directives = (
'data', 'text',
'type', 'global',
'ascii', 'byte', 'short', 'space', 'string', 'word',
'section',
'align', 'file',
'set'
)
# Construct list of tokens, and map of reserved words
tokens = instructions + math_instructions + (
'COMMA', 'COLON', 'HASH', 'LBRAC', 'RBRAC', 'DOT', 'PLUS',
'SCONST', 'ICONST',
'ID', 'REGISTER'
)
reserved_map = {
# Special registers
'sp': 'REGISTER',
'fp': 'REGISTER',
# Special instructions
'shiftl': 'SHL',
'shiftr': 'SHR',
'shiftrs': 'SHRS'
}
reserved_map.update({i.lower(): i for i in instructions})
reserved_map.update({i.lower(): i for i in math_instructions})
tokens = tokens + tuple([i.upper() for i in directives])
reserved_map.update({'.' + i: i.upper() for i in directives})
reserved_map.update({i: i.upper() for i in directives})
reserved_map.update({'r%d' % i: 'REGISTER' for i in range(0, 32)})
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count('\n')
# Tokens
t_COMMA = r','
t_COLON = r':'
t_HASH = r'\#'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_DOT = r'\.'
t_PLUS = r'\+'
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
t_ICONST = r'-?(?:(?:0x[0-9a-fA-F][0-9a-fA-F]*)|(?:[0-9][0-9]*))'
def t_ID(t):
r'[a-zA-Z_\.][a-zA-Z0-9_\.]*'
t.type = reserved_map.get(t.value, 'ID')
return t
t_ignore = " \t"
def t_error(t):
from ..errors import AssemblyIllegalCharError
loc = t.lexer.location.copy()
loc.lineno = t.lineno - loc.lineno
loc.column = t.lexer.parser.lexpos_to_lineno(t.lexpos)
raise AssemblyIllegalCharError(c = t.value[0], location = loc, line = t.lexer.parser.lineno_to_line(t.lineno))
class AssemblyLexer(object):
def __init__(self):
self._lexer = ply.lex.lex()
def token(self, *args, **kwargs):
return self._lexer.token(*args, **kwargs)
def input(self, *args, **kwargs):
return self._lexer.input(*args, **kwargs)
| happz/ducky | ducky/asm/lexer.py | Python | mit | 2,836 | 0.012694 |
# requests-oauth 0.4.0
# Hacked to support RSA-SHA1 encryption for Atlassian OAuth.
# Original author: Miguel Araujo
# Forked from https://github.com/maraujop/requests_oauth
# Original license: 3-clause BSD
from hook import OAuthHook
| blackspiraldev/jira-python | jira/packages/requests_oauth/__init__.py | Python | bsd-2-clause | 235 | 0 |
# WifiZoo
# complains to Hernan Ochoa (hernan@gmail.com)
import curses.ascii
from scapy import *
import datetime
import WifiZooEntities
import os
class WifiGlobals:
def __init__(self):
self.APdict = {}
self.AccessPointsList = []
self.ProbeRequestsListBySSID = []
self.ProbeRequestsListBySRC = []
self._hasPrismHeaders = 1
self._logdir = "./logs/"
self._Cookie = None
self._CookiesList = []
self._pktCounters = {}
self._OUIList = ''
def incrementCounter(self, proto):
if self._pktCounters.has_key(proto):
n = self._pktCounters[proto]
self._pktCounters[proto] = n+1
else:
self._pktCounters[proto] = 1
def getCounter(self, proto):
return self._pktCounters[proto]
def getAllCounters(self):
return self._pktCounters
def getMACVendor(self, aMAC):
if len(aMAC) < 8:
return 'Unknown'
if self._OUIList == '':
f = open("oui_list.txt", "rb")
self._OUIList = f.read()
f.close()
lines = self._OUIList.split('\n')
myMAC = aMAC[0:8]
myMAC = myMAC.lower()
for line in lines:
if len(line) > 8:
vendorname = line.split('\t')[2]
vendormac = line[0:8].replace('-',':').lower()
if vendormac == myMAC:
return vendorname
return 'Unknown'
def addCookie(self, aCookie):
self._CookiesList.append(aCookie)
return
def getCookiesList(self):
return self._CookiesList
def setCookie(self, aCookie):
self._Cookie = aCookie
def getCookie(self):
return self._Cookie
def setHasPrismHeaders(self, aBoolean):
self._hasPrismHeaders = aBoolean
def hasPrismHeaders(self):
return self._hasPrismHeaders
def getClients(self):
return self.APdict
def logDir(self):
if not os.path.isdir( self._logdir ):
os.mkdir( self._logdir )
return "./logs/"
def getProbeRequestsBySSID(self):
return self.ProbeRequestsListBySSID
def addProbeRequest(self, aProbeRequest):
# add ProbeRequest by SSID
found = 0
if len(aProbeRequest.getSSID()) > 0:
for pr in self.ProbeRequestsListBySSID:
if pr.getSSID() == aProbeRequest.getSSID():
# TODO: change the LASTSEEN thing
found = 1
# if SSID was not seen before, add it to the list
if len(aProbeRequest.getSSID()) > 0 and found == 0:
self.ProbeRequestsListBySSID.append( aProbeRequest )
#add ProbeRequest by SRC
if len(aProbeRequest.getSSID()) == 0:
for pr in self.ProbeRequestsListBySRC:
if aProbeRequest.getSRC() == pr.getSRC() and pr.getSSID() == "<Empty>":
return
aProbeRequest.setSSID( "<Empty>" )
self.ProbeRequestsListBySRC.append( aProbeRequest )
return
else:
for pr in self.ProbeRequestsListBySRC:
if pr.getSRC() == aProbeRequest.getSRC() and pr.getSSID() == aProbeRequest.getSSID():
return
# add proberequests with different src or ssid
self.ProbeRequestsListBySRC.append( aProbeRequest )
def dumpProbeRequests(self):
if len(self.ProbeRequestsListBySSID) >= 1:
prf = open( self.logDir() + "probereqsuniqssid.log", "wb" )
for pr in self.ProbeRequestsListBySSID:
prf.write("ssid=" + pr.getSSID() + " dst=" + pr.getDST() + " src=" + pr.getSRC() + " bssid=" + pr.getBSSID() + " (ch: " + str(pr.getChannel()) + ")" + "\n")
prf.close()
if len(self.ProbeRequestsListBySRC) >= 1:
prf = open( self.logDir() + "probereqbysrc.log", "wb" )
setup = """
digraph ProbeReqGraph {
compound=true;
ranksep=1.25;
rankdir="LR";
label="Probe Requests by SRC and SSID";
node [shape=ellipse, fontsize=12];
bgcolor=white;
edge[arrowsize=1, color=black];
"""
prf.write(setup + "\n\n")
for pr in self.ProbeRequestsListBySRC:
prf.write( "\"" + pr.getSSID() + "\"" + " -> " + "\"" + pr.getSRC() + "\"" + "\r\n" )
prf.write("}\n\n")
prf.close()
def getAPList(self):
return self.AccessPointsList
def getAPbyBSSID(self, aBSSID):
for ap in self.AccessPointsList:
if ap.getBSSID() == aBSSID:
return ap
return None
def addAccessPoint(self, bssid, ssid, channel, isprotected):
apFound = 0
for ap in self.AccessPointsList:
if ap.getBSSID() == bssid:
apFound = 1
# could modify this to 'update' SSID of bssid, but mmm
if apFound == 1:
return 0
anAP = WifiZooEntities.AccessPoint()
anAP.setBSSID( bssid )
anAP.setSSID( ssid )
anAP.setChannel( channel )
anAP.setProtected( isprotected )
# I assume it was found NOW, right before this function was called
anAP.setFoundWhen( datetime.datetime.now() )
self.AccessPointsList.append(anAP)
return 1
def dumpAccessPointsList(self, outfile='ssids.log'):
if len(self.AccessPointsList) < 1:
return
sf = open( self.logDir() + outfile , "wb" )
# first dump OPEN networks
for ap in self.AccessPointsList:
if not ap.isProtected():
sf.write( str(ap.getBSSID()) + " -> " + str(ap.getSSID()) + " (ch:" + str(ap.getChannel()) + ")" + " (Encryption:Open)" + " (when: " + str(ap.getFoundWhenString()) + ")" + "\n" )
# now protected networks
for ap in self.AccessPointsList:
if ap.isProtected():
sf.write( str(ap.getBSSID()) + " -> " + str(ap.getSSID()) + " (ch:" + str(ap.getChannel()) + ")" + " (Encryption:YES)" + " (when: " + str(ap.getFoundWhenString()) + ")" + "\n" )
sf.close()
return
def addClients(self, src, dst, bssid):
bssidfound = 0
dump = 0
for x in self.APdict.keys():
if x == bssid:
bssidfound = 1
clientList = self.APdict[ x ]
srcFound = 0
dstFound = 0
for client in clientList:
if client == src:
srcFound = 1
if client == dst:
dstFound = 1
if srcFound == 0:
if src != "ff:ff:ff:ff:ff:ff" and src != bssid:
dump = 1
clientList.append(src)
if dstFound == 0:
if dst != "ff:ff:ff:ff:ff:ff" and dst != bssid:
dump = 1
clientList.append(dst)
self.APdict[ x ] = clientList
if bssidfound == 0:
alist = []
if src != 'ff:ff:ff:ff:ff:ff' and src != bssid:
dump = 1
alist.append( src )
if dst != 'ff:ff:ff:ff:ff:ff' and src != dst and dst != bssid:
dump = 1
alist.append( dst )
self.APdict[ bssid ] = alist
# add this 'nameless' bssid also to the list of access points
#self.addAccessPoint(bssid, '<addedbyClient>', 0, 0)
if dump == 1:
fdump = open(self.logDir()+"clients.log", "wb")
#fdump.write("--DUMP-----" + "-"*30 + "\n")
fdump.write("digraph APgraph {\n\n")
setup = """
compound=true;
ranksep=1.25;
rankdir="LR";
label="802.11 bssids->clients";
node [shape=ellipse, fontsize=12];
bgcolor=white;
edge[arrowsize=1, color=black];
"""
fdump.write(setup + "\n\n")
for apmac in self.APdict.keys():
clientList = self.APdict[ apmac ]
for client in clientList:
#fdump.write("\"" + apmac + "\" -> \"" + client + "\"\n")
ssid = self.getSSID(apmac)
fdump.write("\"" + apmac + " (" + ssid + ")\" -> \"" + client + "\"\n")
fdump.write("\n }\n")
#fdump.write("-----------" + "-"*30 + "\n")
fdump.close()
def getSSID(self, bssid):
aSsid = 'Unknown'
for ap in self.AccessPointsList:
if ap.getBSSID() == bssid:
aSsid = ap.getSSID()
return aSsid
# my weird version
def isAlpha(self, c):
if c != '\x0A' and c != '\x0D':
if curses.ascii.isctrl(c):
return 0
return 1
def getSrcDstBssid(self, pkt):
bssid = ''
src = ''
dst = ''
#0 = mgmt, 1=control, 2=data
p = pkt.getlayer(Dot11)
# is it a DATA packet?
t = p.type
if t == 2:
# if packet FROMDS then dst,bssid,src
# if packet TODS then bssid,src,dst
# toDS
if p.FCfield & 1:
#print "toDS"
bssid = str(p.addr1)
src = str(p.addr2)
dst = str(p.addr3)
# fromDS
elif p.FCfield & 2:
#print "fromDS"
dst = str(p.addr1)
bssid = str(p.addr2)
src = str(p.addr3)
# if bits are 0 & 0, thn ad-hoc network
# if bits are 1 & 1, then WDS system
# TODO
return (src,dst,bssid)
Info = WifiGlobals()
| firebitsbr/pwn_plug_sources | src/wifizoo/wifiglobals.py | Python | gpl-3.0 | 8,025 | 0.04486 |
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Exit with an error code if Valgrind finds "definitely lost" memory.
import os
import subprocess
import sys
import tempfile
VALGRIND = "valgrind"
def valleak(executable):
"""Returns (error, stdout, stderr).
error == 0 if successful, an integer > 0 if there are memory leaks or errors."""
valgrind_output = tempfile.NamedTemporaryFile()
valgrind_command = (
VALGRIND,
"--leak-check=full",
"--log-file-exactly=" + valgrind_output.name,
"--error-exitcode=1",
executable)
process = subprocess.Popen(
valgrind_command,
bufsize = -1,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True)
process.stdin.close()
stdout = process.stdout.read()
stderr = process.stderr.read()
error = process.wait()
valgrind_error_file = open(valgrind_output.name)
valgrind_error = valgrind_error_file.read()
valgrind_error_file.close()
valgrind_output.close()
# Find the last summary block in the valgrind report
# This ignores forks
summary_start = valgrind_error.rindex("== ERROR SUMMARY:")
summary = valgrind_error[summary_start:]
append_valgrind = False
if error == 0:
assert "== ERROR SUMMARY: 0 errors" in summary
# Check for memory leaks
if "== definitely lost:" in summary:
error = 1
append_valgrind = True
elif "== ERROR SUMMARY: 0 errors" not in summary:
# We also have valgrind errors: append the log to stderr
append_valgrind = True
if append_valgrind:
stderr = stderr + "\n\n" + valgrind_error
return (error, stdout, stderr)
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.stderr.write("valleak.py [executable]\n")
sys.exit(1)
exe = sys.argv[1]
error, stdin, stderr = valleak(exe)
sys.stdout.write(stdin)
sys.stderr.write(stderr)
sys.exit(error)
| deerwalk/voltdb | tests/scripts/valleak.py | Python | agpl-3.0 | 3,161 | 0.003796 |
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import logging
import Queue
from threading import Thread, Lock
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.exceptions import CuckooMachineError, CuckooGuestError
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.common.objects import File
from lib.cuckoo.common.utils import create_folder
from lib.cuckoo.core.database import Database, TASK_COMPLETED, TASK_REPORTED
from lib.cuckoo.core.guest import GuestManager
from lib.cuckoo.core.plugins import list_plugins, RunAuxiliary, RunProcessing
from lib.cuckoo.core.plugins import RunSignatures, RunReporting
from lib.cuckoo.core.resultserver import ResultServer
log = logging.getLogger(__name__)
machinery = None
machine_lock = Lock()
latest_symlink_lock = Lock()
active_analysis_count = 0
class CuckooDeadMachine(Exception):
"""Exception thrown when a machine turns dead.
When this exception has been thrown, the analysis task will start again,
and will try to use another machine, when available.
"""
pass
class AnalysisManager(Thread):
"""Analysis Manager.
This class handles the full analysis process for a given task. It takes
care of selecting the analysis machine, preparing the configuration and
interacting with the guest agent and analyzer components to launch and
complete the analysis and store, process and report its results.
"""
def __init__(self, task, error_queue):
"""@param task: task object containing the details for the analysis."""
Thread.__init__(self)
Thread.daemon = True
self.task = task
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.machine = None
def init_storage(self):
"""Initialize analysis storage folder."""
self.storage = os.path.join(CUCKOO_ROOT,
"storage",
"analyses",
str(self.task.id))
# If the analysis storage folder already exists, we need to abort the
# analysis or previous results will be overwritten and lost.
if os.path.exists(self.storage):
log.error("Analysis results folder already exists at path \"%s\","
" analysis aborted", self.storage)
return False
# If we're not able to create the analysis storage folder, we have to
# abort the analysis.
try:
create_folder(folder=self.storage)
except CuckooOperationalError:
log.error("Unable to create analysis folder %s", self.storage)
return False
return True
def check_file(self):
"""Checks the integrity of the file to be analyzed."""
sample = Database().view_sample(self.task.sample_id)
sha256 = File(self.task.target).get_sha256()
if sha256 != sample.sha256:
log.error("Target file has been modified after submission: \"%s\"", self.task.target)
return False
return True
def store_file(self):
"""Store a copy of the file being analyzed."""
if not os.path.exists(self.task.target):
log.error("The file to analyze does not exist at path \"%s\", "
"analysis aborted", self.task.target)
return False
sha256 = File(self.task.target).get_sha256()
self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(self.binary):
log.info("File already exists at \"%s\"", self.binary)
else:
# TODO: do we really need to abort the analysis in case we are not
# able to store a copy of the file?
try:
shutil.copy(self.task.target, self.binary)
except (IOError, shutil.Error) as e:
log.error("Unable to store file from \"%s\" to \"%s\", "
"analysis aborted", self.task.target, self.binary)
return False
try:
new_binary_path = os.path.join(self.storage, "binary")
if hasattr(os, "symlink"):
os.symlink(self.binary, new_binary_path)
else:
shutil.copy(self.binary, new_binary_path)
except (AttributeError, OSError) as e:
log.error("Unable to create symlink/copy from \"%s\" to "
"\"%s\": %s", self.binary, self.storage, e)
return True
def acquire_machine(self):
"""Acquire an analysis machine from the pool of available ones."""
machine = None
# Start a loop to acquire the a machine to run the analysis on.
while True:
machine_lock.acquire()
# In some cases it's possible that we enter this loop without
# having any available machines. We should make sure this is not
# such case, or the analysis task will fail completely.
if not machinery.availables():
machine_lock.release()
time.sleep(1)
continue
# If the user specified a specific machine ID, a platform to be
# used or machine tags acquire the machine accordingly.
try:
machine = machinery.acquire(machine_id=self.task.machine,
platform=self.task.platform,
tags=self.task.tags)
finally:
machine_lock.release()
# If no machine is available at this moment, wait for one second
# and try again.
if not machine:
log.debug("Task #%d: no machine available yet", self.task.id)
time.sleep(1)
else:
log.info("Task #%d: acquired machine %s (label=%s)",
self.task.id, machine.name, machine.label)
break
self.machine = machine
def build_options(self):
"""Generate analysis options.
@return: options dict.
"""
options = {}
options["id"] = self.task.id
options["ip"] = self.machine.resultserver_ip
options["port"] = self.machine.resultserver_port
options["category"] = self.task.category
options["target"] = self.task.target
options["package"] = self.task.package
options["options"] = self.task.options
options["enforce_timeout"] = self.task.enforce_timeout
options["clock"] = self.task.clock
options["terminate_processes"] = self.cfg.cuckoo.terminate_processes
if not self.task.timeout or self.task.timeout == 0:
options["timeout"] = self.cfg.timeouts.default
else:
options["timeout"] = self.task.timeout
if self.task.category == "file":
options["file_name"] = File(self.task.target).get_name()
options["file_type"] = File(self.task.target).get_type()
return options
def launch_analysis(self):
"""Start analysis."""
succeeded = False
dead_machine = False
log.info("Starting analysis of %s \"%s\" (task=%d)",
self.task.category.upper(), self.task.target, self.task.id)
# Initialize the analysis folders.
if not self.init_storage():
return False
if self.task.category == "file":
# Check whether the file has been changed for some unknown reason.
# And fail this analysis if it has been modified.
if not self.check_file():
return False
# Store a copy of the original file.
if not self.store_file():
return False
# Acquire analysis machine.
try:
self.acquire_machine()
except CuckooOperationalError as e:
log.error("Cannot acquire machine: {0}".format(e))
return False
# Generate the analysis configuration file.
options = self.build_options()
# At this point we can tell the ResultServer about it.
try:
ResultServer().add_task(self.task, self.machine)
except Exception as e:
machinery.release(self.machine.label)
self.errors.put(e)
aux = RunAuxiliary(task=self.task, machine=self.machine)
aux.start()
try:
# Mark the selected analysis machine in the database as started.
guest_log = Database().guest_start(self.task.id,
self.machine.name,
self.machine.label,
machinery.__class__.__name__)
# Start the machine.
machinery.start(self.machine.label)
# Initialize the guest manager.
guest = GuestManager(self.machine.name, self.machine.ip,
self.machine.platform)
# Start the analysis.
guest.start_analysis(options)
guest.wait_for_completion()
succeeded = True
except CuckooMachineError as e:
log.error(str(e), extra={"task_id": self.task.id})
dead_machine = True
except CuckooGuestError as e:
log.error(str(e), extra={"task_id": self.task.id})
finally:
# Stop Auxiliary modules.
aux.stop()
# Take a memory dump of the machine before shutting it off.
if self.cfg.cuckoo.memory_dump or self.task.memory:
try:
dump_path = os.path.join(self.storage, "memory.dmp")
machinery.dump_memory(self.machine.label, dump_path)
except NotImplementedError:
log.error("The memory dump functionality is not available "
"for the current machine manager.")
except CuckooMachineError as e:
log.error(e)
try:
# Stop the analysis machine.
machinery.stop(self.machine.label)
except CuckooMachineError as e:
log.warning("Unable to stop machine %s: %s",
self.machine.label, e)
# Mark the machine in the database as stopped. Unless this machine
# has been marked as dead, we just keep it as "started" in the
# database so it'll not be used later on in this session.
Database().guest_stop(guest_log)
# After all this, we can make the ResultServer forget about the
# internal state for this analysis task.
ResultServer().del_task(self.task, self.machine)
if dead_machine:
# Remove the guest from the database, so that we can assign a
# new guest when the task is being analyzed with another
# machine.
Database().guest_remove(guest_log)
# Remove the analysis directory that has been created so
# far, as launch_analysis() is going to be doing that again.
shutil.rmtree(self.storage)
# This machine has turned dead, so we throw an exception here
# which informs the AnalysisManager that it should analyze
# this task again with another available machine.
raise CuckooDeadMachine()
try:
# Release the analysis machine. But only if the machine has
# not turned dead yet.
machinery.release(self.machine.label)
except CuckooMachineError as e:
log.error("Unable to release machine %s, reason %s. "
"You might need to restore it manually.",
self.machine.label, e)
return succeeded
def process_results(self):
"""Process the analysis results and generate the enabled reports."""
results = RunProcessing(task_id=self.task.id).run()
RunSignatures(results=results).run()
RunReporting(task_id=self.task.id, results=results).run()
# If the target is a file and the user enabled the option,
# delete the original copy.
if self.task.category == "file" and self.cfg.cuckoo.delete_original:
if not os.path.exists(self.task.target):
log.warning("Original file does not exist anymore: \"%s\": "
"File not found.", self.task.target)
else:
try:
os.remove(self.task.target)
except OSError as e:
log.error("Unable to delete original file at path "
"\"%s\": %s", self.task.target, e)
# If the target is a file and the user enabled the delete copy of
# the binary option, then delete the copy.
if self.task.category == "file" and self.cfg.cuckoo.delete_bin_copy:
if not os.path.exists(self.binary):
log.warning("Copy of the original file does not exist anymore: \"%s\": File not found", self.binary)
else:
try:
os.remove(self.binary)
except OSError as e:
log.error("Unable to delete the copy of the original file at path \"%s\": %s", self.binary, e)
log.info("Task #%d: reports generation completed (path=%s)",
self.task.id, self.storage)
return True
def run(self):
"""Run manager thread."""
global active_analysis_count
active_analysis_count += 1
try:
while True:
try:
success = self.launch_analysis()
except CuckooDeadMachine:
continue
break
Database().set_status(self.task.id, TASK_COMPLETED)
log.debug("Released database task #%d with status %s",
self.task.id, success)
if self.cfg.cuckoo.process_results:
self.process_results()
Database().set_status(self.task.id, TASK_REPORTED)
# We make a symbolic link ("latest") which links to the latest
# analysis - this is useful for debugging purposes. This is only
# supported under systems that support symbolic links.
if hasattr(os, "symlink"):
latest = os.path.join(CUCKOO_ROOT, "storage",
"analyses", "latest")
# First we have to remove the existing symbolic link, then we
# have to create the new one.
# Deal with race conditions using a lock.
latest_symlink_lock.acquire()
try:
if os.path.exists(latest):
os.remove(latest)
os.symlink(self.storage, latest)
except OSError as e:
log.warning("Error pointing latest analysis symlink: %s" % e)
finally:
latest_symlink_lock.release()
log.info("Task #%d: analysis procedure completed", self.task.id)
except:
log.exception("Failure in AnalysisManager.run")
active_analysis_count -= 1
class Scheduler:
"""Tasks Scheduler.
This class is responsible for the main execution loop of the tool. It
prepares the analysis machines and keep waiting and loading for new
analysis tasks.
Whenever a new task is available, it launches AnalysisManager which will
take care of running the full analysis process and operating with the
assigned analysis machine.
"""
def __init__(self, maxcount=None):
self.running = True
self.cfg = Config()
self.db = Database()
self.maxcount = maxcount
self.total_analysis_count = 0
def initialize(self):
"""Initialize the machine manager."""
global machinery
machinery_name = self.cfg.cuckoo.machinery
log.info("Using \"%s\" machine manager", machinery_name)
# Get registered class name. Only one machine manager is imported,
# therefore there should be only one class in the list.
plugin = list_plugins("machinery")[0]
# Initialize the machine manager.
machinery = plugin()
# Find its configuration file.
conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name)
if not os.path.exists(conf):
raise CuckooCriticalError("The configuration file for machine "
"manager \"{0}\" does not exist at path:"
" {1}".format(machinery_name, conf))
# Provide a dictionary with the configuration options to the
# machine manager instance.
machinery.set_options(Config(machinery_name))
# Initialize the machine manager.
try:
machinery.initialize(machinery_name)
except CuckooMachineError as e:
raise CuckooCriticalError("Error initializing machines: %s" % e)
# At this point all the available machines should have been identified
# and added to the list. If none were found, Cuckoo needs to abort the
# execution.
if not len(machinery.machines()):
raise CuckooCriticalError("No machines available.")
else:
log.info("Loaded %s machine/s", len(machinery.machines()))
if len(machinery.machines()) > 1 and self.db.engine.name == "sqlite":
log.warning("As you've configured Cuckoo to execute parallel "
"analyses, we recommend you to switch to a MySQL "
"a PostgreSQL database as SQLite might cause some "
"issues.")
if len(machinery.machines()) > 4 and self.cfg.cuckoo.process_results:
log.warning("When running many virtual machines it is recommended "
"to process the results in a separate process.py to "
"increase throughput and stability. Please read the "
"documentation about the `Processing Utility`.")
def stop(self):
"""Stop scheduler."""
self.running = False
# Shutdown machine manager (used to kill machines that still alive).
machinery.shutdown()
def start(self):
"""Start scheduler."""
self.initialize()
log.info("Waiting for analysis tasks.")
# Message queue with threads to transmit exceptions (used as IPC).
errors = Queue.Queue()
# Command-line overrides the configuration file.
if self.maxcount is None:
self.maxcount = self.cfg.cuckoo.max_analysis_count
# This loop runs forever.
while self.running:
time.sleep(1)
# If not enough free disk space is available, then we print an
# error message and wait another round (this check is ignored
# when the freespace configuration variable is set to zero).
if self.cfg.cuckoo.freespace:
# Resolve the full base path to the analysis folder, just in
# case somebody decides to make a symbolic link out of it.
dir_path = os.path.join(CUCKOO_ROOT, "storage", "analyses")
# TODO: Windows support
if hasattr(os, "statvfs"):
dir_stats = os.statvfs(dir_path)
# Calculate the free disk space in megabytes.
space_available = dir_stats.f_bavail * dir_stats.f_frsize
space_available /= 1024 * 1024
if space_available < self.cfg.cuckoo.freespace:
log.error("Not enough free disk space! (Only %d MB!)",
space_available)
continue
# Have we limited the number of concurrently executing machines?
if self.cfg.cuckoo.max_machines_count > 0:
# Are too many running?
if len(machinery.running()) >= self.cfg.cuckoo.max_machines_count:
continue
# If no machines are available, it's pointless to fetch for
# pending tasks. Loop over.
if not machinery.availables():
continue
# Exits if max_analysis_count is defined in the configuration
# file and has been reached.
if self.maxcount and self.total_analysis_count >= self.maxcount:
if active_analysis_count <= 0:
self.stop()
else:
# Fetch a pending analysis task.
#TODO: this fixes only submissions by --machine, need to add other attributes (tags etc.)
for machine in self.db.get_available_machines():
task = self.db.fetch(machine=machine.name)
if task:
log.debug("Processing task #%s", task.id)
self.total_analysis_count += 1
# Initialize and start the analysis manager.
analysis = AnalysisManager(task, errors)
analysis.start()
# Deal with errors.
try:
raise errors.get(block=False)
except Queue.Empty:
pass
| liorvh/CuckooSploit | lib/cuckoo/core/scheduler.py | Python | gpl-3.0 | 21,948 | 0.000456 |
#
# Copyright 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import tempfile
from testlib import VdsmTestCase
from testlib import permutations, expandPermutations
from vdsm import osinfo
@expandPermutations
class TestOsinfo(VdsmTestCase):
@permutations([
[b'', ''],
[b'\n', ''],
[b'a', 'a'],
[b'a\n', 'a'],
[b'a\nb', 'a']
])
def test_kernel_args(self, test_input, expected_result):
with tempfile.NamedTemporaryFile() as f:
f.write(test_input)
f.flush()
self.assertEqual(osinfo.kernel_args(f.name),
expected_result)
| EdDev/vdsm | tests/osinfo_test.py | Python | gpl-2.0 | 1,439 | 0 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import sys
from pyface.action.action import Action
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
from pyface.message_dialog import information
from pyface.tasks.action.task_action import TaskAction
from pyface.tasks.task_window_layout import TaskWindowLayout
from traits.api import Any, List
from pychron.envisage.resources import icon
from pychron.envisage.ui_actions import UIAction, UITaskAction
# ===============================================================================
# help
# ===============================================================================
# from pychron.envisage.user_login import login_file
def restart():
os.execl(sys.executable, *([sys.executable] + sys.argv))
def get_key_binding(k_id):
from pychron.envisage.key_bindings import user_key_map
try:
return user_key_map[k_id][0]
except KeyError:
pass
class myTaskAction(TaskAction):
task_ids = List
def _task_changed(self):
if self.task:
if self.task.id in self.task_ids:
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(
self._get_attr(self.object, self.enabled_name, False)
)
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
class PAction(UIAction):
def __init__(self, *args, **kw):
super(PAction, self).__init__(*args, **kw)
acc = get_key_binding(self.id)
self.accelerator = acc or self.accelerator
class PTaskAction(UITaskAction):
def __init__(self, *args, **kw):
super(PTaskAction, self).__init__(*args, **kw)
acc = get_key_binding(self.id)
self.accelerator = acc or self.accelerator
class DemoAction(Action):
name = "Demo"
accelerator = "Shift+Ctrl+0"
def perform(self, event):
app = event.task.application
app.info("Demo message: {}".format("Hello version 2.0"))
class StartupTestsAction(Action):
name = "Run Startup Tests"
def perform(self, event):
app = event.task.application
app.do_startup_tests(
force_show_results=True, cancel_auto_close=True, can_cancel=False
)
class KeyBindingsAction(PAction):
name = "Edit Key Bindings"
def perform(self, event):
from pychron.envisage.key_bindings import edit_key_bindings
edit_key_bindings()
class UserAction(PAction):
def _get_current_user(self, event):
app = event.task.application
args = app.id.split(".")
cuser = args[-1]
base_id = ".".join(args[:-1])
return base_id, cuser
class SwitchUserAction(UserAction):
name = "Switch User"
image = icon("user_suit")
def perform(self, event):
pass
# from pychron.envisage.user_login import get_user
#
# base_id, cuser = self._get_current_user(event)
# user = get_user(current=cuser)
# if user:
# # from pychron.paths import paths
# # set login file
# with open(login_file, 'w') as wfile:
# wfile.write(user)
# restart()
class CopyPreferencesAction(UserAction):
name = "Copy Preferences"
def perform(self, event):
pass
# from pychron.envisage.user_login import get_src_dest_user
#
# base_id, cuser = self._get_current_user(event)
# src_name, dest_names = get_src_dest_user(cuser)
#
# if src_name:
#
# for di in dest_names:
# dest_id = '{}.{}'.format(base_id, di)
# src_id = '{}.{}'.format(base_id, src_name)
#
# root = os.path.join(os.path.expanduser('~'), '.enthought')
#
# src_dir = os.path.join(root, src_id)
# dest_dir = os.path.join(root, dest_id)
# if not os.path.isdir(dest_dir):
# os.mkdir(dest_dir)
#
# name = 'preferences.ini'
# dest = os.path.join(dest_dir, name)
# src = os.path.join(src_dir, name)
# shutil.copyfile(src, dest)
class RestartAction(PAction):
name = "Restart"
image = icon("system-restart")
def perform(self, event):
restart()
class WebAction(PAction):
def _open_url(self, url):
import webbrowser
import requests
try:
requests.get(url)
except BaseException as e:
print("web action url:{} exception:{}".format(url, e))
return
webbrowser.open_new(url)
return True
class IssueAction(WebAction):
name = "Add Request/Report Bug"
image = icon("bug")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
app = event.task.window.application
name = app.preferences.get("pychron.general.organization")
if not name:
information(
event.task.window.control,
'Please set an "Organziation" in General Preferences',
)
return
url = "https://github.com/{}/pychron/issues/new".format(name)
self._open_url(url)
class SettingsAction(Action):
def perform(self, event):
app = event.task.window.application
name = app.preferences.get("pychron.general.remote")
if not name:
information(
event.task.window.control,
'Please set an "Laboratory Repo" in General Preferences',
)
return
from pychron.envisage.settings_repo import SettingsRepoManager
from pychron.paths import paths
root = os.path.join(paths.root_dir, ".lab")
exists = os.path.isdir(os.path.join(root, ".git"))
if exists:
repo = SettingsRepoManager()
repo.path = root
repo.open_repo(root)
repo.pull()
else:
url = "https://github.com/{}".format(name)
repo = SettingsRepoManager.clone_from(url, root)
self._perform(repo)
def _perform(self, repo):
raise NotImplementedError
class ApplySettingsAction(SettingsAction):
name = "Apply Settings..."
def _perform(self, repo):
"""
select and apply settings from the laboratory's repository
:param repo:
:return:
"""
repo.apply_settings()
class ShareSettingsAction(SettingsAction):
name = "Share Settings..."
def _perform(self, repo):
"""
save current settings to the laboratory's repository
:param repo:
:return:
"""
repo.share_settings()
class NoteAction(WebAction):
name = "Add Laboratory Note"
image = icon("insert-comment")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
app = event.task.window.application
name = app.preferences.get("pychron.general.remote")
if not name:
information(
event.task.window.control,
'Please set an "Laboratory Repo" in General Preferences',
)
return
url = "https://github.com/{}/issues/new".format(name)
self._open_url(url)
class DocumentationAction(WebAction):
name = "View Documentation"
image = icon("documentation")
def perform(self, event):
"""
goto issues page add an request or report bug
"""
url = "http://pychron.readthedocs.org/en/latest/index.html"
self._open_url(url)
class WaffleAction(WebAction):
name = "View Waffle Board"
image = icon("waffle")
def perform(self, event):
"""
goto waffle page
"""
url = "https://waffle.io/NMGRL/pychron"
self._open_url(url)
class ChangeLogAction(WebAction):
name = "What's New"
image = icon("documentation")
description = "View changelog"
def perform(self, event):
"""
goto issues page add an request or report bug
"""
from pychron.version import __version__
app = event.task.window.application
org = app.preferences.get("pychron.general.organization")
url = "https://github.com/{}/pychron/blob/release/v{}/CHANGELOG.md".format(
org, __version__
)
if not self._open_url(url):
url = "https://github.com/{}/pychron/blob/develop/CHANGELOG.md".format(org)
self._open_url(url)
class AboutAction(PAction):
name = "About Pychron"
def perform(self, event):
app = event.task.window.application
app.about()
class ResetLayoutAction(PTaskAction):
name = "Reset Layout"
image = icon("view-restore")
def perform(self, event):
self.task.window.reset_layout()
class PositionAction(PAction):
name = "Window Positions"
image = icon("window-new")
def perform(self, event):
from pychron.envisage.tasks.layout_manager import LayoutManager
app = event.task.window.application
lm = LayoutManager(app)
lm.edit_traits()
class MinimizeAction(PTaskAction):
name = "Minimize"
accelerator = "Ctrl+m"
def perform(self, event):
app = self.task.window.application
app.active_window.control.showMinimized()
class CloseAction(PTaskAction):
name = "Close"
accelerator = "Ctrl+W"
def perform(self, event):
ok = YES
if len(self.task.window.application.windows) == 1:
ok = confirm(self.task.window.control, message="Quit Pychron?")
if ok == YES:
self.task.window.close()
class CloseOthersAction(PTaskAction):
name = "Close others"
accelerator = "Ctrl+Shift+W"
def perform(self, event):
win = self.task.window
for wi in self.task.window.application.windows:
if wi != win:
wi.close()
class OpenAdditionalWindow(PTaskAction):
name = "Open Additional Window"
description = "Open an additional window of the current active task"
def perform(self, event):
app = self.task.window.application
win = app.create_window(TaskWindowLayout(self.task.id))
win.open()
class RaiseAction(PTaskAction):
window = Any
style = "toggle"
def perform(self, event):
self.window.activate()
self.checked = True
# @on_trait_change('window:deactivated')
# def _on_deactivate(self):
# self.checked = False
class RaiseUIAction(PTaskAction):
style = "toggle"
def perform(self, event):
self.checked = True
class GenericSaveAction(PTaskAction):
name = "Save"
accelerator = "Ctrl+S"
image = icon("document-save")
def perform(self, event):
task = self.task
if hasattr(task, "save"):
task.save()
class GenericSaveAsAction(PTaskAction):
name = "Save As..."
accelerator = "Ctrl+Shift+S"
image = icon("document-save-as")
def perform(self, event):
task = self.task
if hasattr(task, "save_as"):
task.save_as()
class GenericFindAction(PTaskAction):
accelerator = "Ctrl+F"
name = "Find text..."
def perform(self, event):
task = self.task
if hasattr(task, "find"):
task.find()
class FileOpenAction(PAction):
task_id = ""
test_path = ""
image = icon("document-open")
def perform(self, event):
if event.task.id == self.task_id:
task = event.task
task.open()
else:
application = event.task.window.application
win = application.create_window(TaskWindowLayout(self.task_id))
task = win.active_task
if task.open(path=self.test_path):
win.open()
class NewAction(PAction):
task_id = ""
def perform(self, event):
if event.task.id == self.task_id:
task = event.task
task.new()
else:
application = event.task.window.application
win = application.create_window(TaskWindowLayout(self.task_id))
task = win.active_task
if task.new():
win.open()
# class GenericReplaceAction(TaskAction):
# pass
# else:
# manager = self._get_experimentor(event)
# manager.save_as_experiment_queues()
class ToggleFullWindowAction(TaskAction):
name = "Toggle Full Window"
method = "toggle_full_window"
image = icon("view-fullscreen-8")
class EditInitializationAction(Action):
name = "Edit Initialization"
image = icon("brick-edit")
def perform(self, event):
from pychron.envisage.initialization.initialization_edit_view import (
edit_initialization,
)
if edit_initialization():
restart()
class EditTaskExtensionsAction(Action):
name = "Edit UI..."
def perform(self, event):
app = event.task.window.application
from pychron.envisage.task_extensions import edit_task_extensions
if edit_task_extensions(app.available_task_extensions):
restart()
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/envisage/tasks/actions.py | Python | apache-2.0 | 14,692 | 0.000204 |
import unittest
from mock import Mock, patch
from expyrimenter import Executor
from expyrimenter.runnable import Runnable
from subprocess import CalledProcessError
from concurrent.futures import ThreadPoolExecutor
import re
class TestExecutor(unittest.TestCase):
output = 'TestExecutor output'
outputs = ['TestExecutor 1', 'TestExecutor 2']
def test_runnable_output(self):
executor = Executor()
with patch.object(Runnable, 'run', return_value=TestExecutor.output):
executor.run(Runnable())
executor.wait()
results = executor.results
self.assertEqual(1, len(results))
self.assertEqual(TestExecutor.output, results[0])
def test_runnable_outputs(self):
executor = Executor()
runnable = Runnable()
with patch.object(Runnable, 'run', side_effect=TestExecutor.outputs):
executor.run(runnable)
executor.run(runnable)
executor.wait()
results = executor.results
self.assertListEqual(TestExecutor.outputs, results)
def test_function_output(self):
executor = Executor()
executor.run_function(background_function)
executor.wait()
output = executor.results[0]
self.assertEqual(TestExecutor.output, output)
def test_function_outputs(self):
executor = Executor()
runnable = Runnable()
with patch.object(Runnable, 'run', side_effect=TestExecutor.outputs):
executor.run(runnable)
executor.run(runnable)
executor.wait()
results = executor.results
self.assertListEqual(TestExecutor.outputs, results)
def test_against_runnable_memory_leak(self):
executor = Executor()
with patch.object(Runnable, 'run'):
executor.run(Runnable())
executor.wait()
self.assertEqual(0, len(executor._future_runnables))
def test_against_function_memory_leak(self):
executor = Executor()
executor.run_function(background_function)
executor.wait()
self.assertEqual(0, len(executor._function_titles))
def test_if_shutdown_shutdowns_executor(self):
executor = Executor()
executor._executor = Mock()
executor.shutdown()
executor._executor.shutdown.called_once_with()
def test_if_shutdown_clears_function_resources(self):
executor = Executor()
executor._function_titles = Mock()
executor.shutdown()
executor._function_titles.clear.assert_called_once_with()
def test_if_shutdown_clears_runnable_resources(self):
executor = Executor()
executor._future_runnables = Mock()
executor.shutdown()
executor._future_runnables.clear.assert_called_once_with()
def test_exception_logging(self):
executor = Executor()
executor._log = Mock()
with patch.object(Runnable, 'run', side_effect=Exception):
executor.run(Runnable)
executor.wait()
self.assertEqual(1, executor._log.error.call_count)
@patch.object(ThreadPoolExecutor, '__init__', return_value=None)
def test_specified_max_workers(self, pool_mock):
max = 42
Executor(max)
pool_mock.assert_called_once_with(42)
def test_calledprocesserror_logging(self):
executor = Executor()
executor._log = Mock()
exception = CalledProcessError(returncode=1, cmd='command')
with patch.object(Runnable, 'run', side_effect=exception):
executor.run(Runnable)
executor.wait()
self.assertEqual(1, executor._log.error.call_count)
def test_if_logged_title_is_hidden_if_it_equals_command(self):
command = 'command'
runnable = Runnable()
runnable.title = command
exception = CalledProcessError(returncode=1, cmd=command)
runnable.run = Mock(side_effect=exception)
executor = Executor()
executor._log = Mock()
executor.run(runnable)
executor.wait()
executor._log.error.assert_called_once_with(Matcher(has_not_title))
def test_logged_title_when_it_differs_from_command(self):
command, title = 'command', 'title'
runnable = Runnable()
runnable.title = title
exception = CalledProcessError(returncode=1, cmd=command)
runnable.run = Mock(side_effect=exception)
executor = Executor()
executor._log = Mock()
executor.run(runnable)
executor.wait()
executor._log.error.assert_called_once_with(Matcher(has_title))
def has_title(msg):
return re.match("(?ims).*Title", msg) is not None
def has_not_title(msg):
return re.match("(?ims).*Title", msg) is None
class Matcher:
def __init__(self, compare):
self.compare = compare
def __eq__(self, msg):
return self.compare(msg)
def background_function():
return TestExecutor.output
if __name__ == '__main__':
unittest.main()
| cemsbr/expyrimenter | tests/test_executor.py | Python | gpl-3.0 | 4,989 | 0.0002 |
from __future__ import absolute_import
import os
import subprocess
import sys
import tarfile
import tempfile
import traceback
import zipfile
from threading import Thread
import six
from django.utils.text import get_valid_filename
from django.core.files import File
from django.conf import settings
from celery import Task
from celery import app
from celery.schedules import crontab
from celery.signals import worker_process_init
from .backend import utils
from . import settings as wooey_settings
try:
from Queue import Empty, Queue
except ImportError:
from queue import Empty, Queue # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
celery_app = app.app_or_default()
def enqueue_output(out, q):
for line in iter(out.readline, b''):
q.put(line.decode('utf-8'))
try:
out.close()
except IOError:
pass
def output_monitor_queue(queue, out):
p = Thread(target=enqueue_output, args=(out, queue))
p.start()
return p
def update_from_output_queue(queue, out):
lines = []
while True:
try:
line = queue.get_nowait()
lines.append(line)
except Empty:
break
out += ''.join(map(str, lines))
return out
@worker_process_init.connect
def configure_workers(*args, **kwargs):
# this sets up Django on nodes started by the worker daemon.
import django
django.setup()
class WooeyTask(Task):
pass
# def after_return(self, status, retval, task_id, args, kwargs, einfo):
# job, created = WooeyJob.objects.get_or_create(wooey_celery_id=task_id)
# job.content_type.wooey_celery_state = status
# job.save()
def get_latest_script(script_version):
"""Downloads the latest script version to the local storage.
:param script_version: :py:class:`~wooey.models.core.ScriptVersion`
:return: boolean
Returns true if a new version was downloaded.
"""
script_path = script_version.script_path
local_storage = utils.get_storage(local=True)
script_exists = local_storage.exists(script_path.name)
if not script_exists:
local_storage.save(script_path.name, script_path.file)
return True
else:
# If script exists, make sure the version is valid, otherwise fetch a new one
script_contents = local_storage.open(script_path.name).read()
script_checksum = utils.get_checksum(buff=script_contents)
if script_checksum != script_version.checksum:
tf = tempfile.TemporaryFile()
with tf:
tf.write(script_contents)
tf.seek(0)
local_storage.delete(script_path.name)
local_storage.save(script_path.name, tf)
return True
return False
@celery_app.task(base=WooeyTask)
def submit_script(**kwargs):
job_id = kwargs.pop('wooey_job')
resubmit = kwargs.pop('wooey_resubmit', False)
from .models import WooeyJob, UserFile
job = WooeyJob.objects.get(pk=job_id)
stdout, stderr = '', ''
try:
command = utils.get_job_commands(job=job)
if resubmit:
# clone ourselves, setting pk=None seems hackish but it works
job.pk = None
# This is where the script works from -- it is what is after the media_root since that may change between
# setups/where our user uploads are stored.
cwd = job.get_output_path()
abscwd = os.path.abspath(os.path.join(settings.MEDIA_ROOT, cwd))
job.command = ' '.join(command)
job.save_path = cwd
utils.mkdirs(abscwd)
# make sure we have the script, otherwise download it. This can happen if we have an ephemeral file system or are
# executing jobs on a worker node.
get_latest_script(job.script_version)
job.status = WooeyJob.RUNNING
job.save()
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=abscwd, bufsize=0)
# We need to use subprocesses to capture the IO, otherwise they will block one another
# i.e. a check against stderr will sit waiting on stderr before returning
# we use Queues to communicate
qout, qerr = Queue(), Queue()
pout = output_monitor_queue(qout, proc.stdout)
perr = output_monitor_queue(qerr, proc.stderr)
prev_std = None
def check_output(job, stdout, stderr, prev_std):
# Check for updates from either (non-blocking)
stdout = update_from_output_queue(qout, stdout)
stderr = update_from_output_queue(qerr, stderr)
# If there are changes, update the db
if (stdout, stderr) != prev_std:
job.update_realtime(stdout=stdout, stderr=stderr)
prev_std = (stdout, stderr)
return stdout, stderr, prev_std
# Loop until the process is complete + both stdout/stderr have EOFd
while proc.poll() is None or pout.is_alive() or perr.is_alive():
stdout, stderr, prev_std = check_output(job, stdout, stderr, prev_std)
# Catch any remaining output
try:
proc.stdout.flush()
except ValueError: # Handle if stdout is closed
pass
stdout, stderr, prev_std = check_output(job, stdout, stderr, prev_std)
return_code = proc.returncode
# tar/zip up the generated content for bulk downloads
def get_valid_file(cwd, name, ext):
out = os.path.join(cwd, name)
index = 0
while os.path.exists(six.u('{}.{}').format(out, ext)):
index += 1
out = os.path.join(cwd, six.u('{}_{}').format(name, index))
return six.u('{}.{}').format(out, ext)
# fetch the job again in case the database connection was lost during the job or something else changed.
job = WooeyJob.objects.get(pk=job_id)
# if there are files generated, make zip/tar files for download
if len(os.listdir(abscwd)):
tar_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'tar.gz')
tar = tarfile.open(tar_out, "w:gz")
tar_name = os.path.splitext(os.path.splitext(os.path.split(tar_out)[1])[0])[0]
tar.add(abscwd, arcname=tar_name)
tar.close()
zip_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'zip')
zip = zipfile.ZipFile(zip_out, "w")
arcname = os.path.splitext(os.path.split(zip_out)[1])[0]
zip.write(abscwd, arcname=arcname)
base_dir = os.path.split(zip_out)[0]
for root, folders, filenames in os.walk(base_dir):
for filename in filenames:
path = os.path.join(root, filename)
archive_name = path.replace(base_dir, '')
if archive_name.startswith(os.path.sep):
archive_name = archive_name.replace(os.path.sep, '', 1)
archive_name = os.path.join(arcname, archive_name)
if path == tar_out:
continue
if path == zip_out:
continue
try:
zip.write(path, arcname=archive_name)
except:
stderr = '{}\n{}'.format(stderr, traceback.format_exc())
try:
zip.close()
except:
stderr = '{}\n{}'.format(stderr, traceback.format_exc())
# save all the files generated as well to our default storage for ephemeral storage setups
if wooey_settings.WOOEY_EPHEMERAL_FILES:
for root, folders, files in os.walk(abscwd):
for filename in files:
filepath = os.path.join(root, filename)
s3path = os.path.join(root[root.find(cwd):], filename)
remote = utils.get_storage(local=False)
exists = remote.exists(s3path)
filesize = remote.size(s3path) if exists else 0
if not exists or (exists and filesize == 0):
if exists:
remote.delete(s3path)
remote.save(s3path, File(open(filepath, 'rb')))
utils.create_job_fileinfo(job)
job.status = WooeyJob.COMPLETED if return_code == 0 else WooeyJob.FAILED
job.update_realtime(delete=True)
except Exception:
stderr = '{}\n{}'.format(stderr, traceback.format_exc())
job.status = WooeyJob.ERROR
job.stdout = stdout
job.stderr = stderr
job.save()
return (stdout, stderr)
@celery_app.task(base=WooeyTask)
def cleanup_wooey_jobs(**kwargs):
from django.utils import timezone
from .models import WooeyJob
cleanup_settings = wooey_settings.WOOEY_JOB_EXPIRATION
anon_settings = cleanup_settings.get('anonymous')
now = timezone.now()
if anon_settings:
WooeyJob.objects.filter(user=None, created_date__lte=now-anon_settings).delete()
user_settings = cleanup_settings.get('user')
if user_settings:
WooeyJob.objects.filter(user__isnull=False, created_date__lte=now-user_settings).delete()
@celery_app.task(base=WooeyTask)
def cleanup_dead_jobs():
"""
This cleans up jobs that have been marked as ran, but are not queue'd in celery. It is meant
to cleanup jobs that have been lost due to a server crash or some other reason a job is
in limbo.
"""
from .models import WooeyJob
# Get active tasks from Celery
inspect = celery_app.control.inspect()
worker_info = inspect.active()
# If we cannot connect to the workers, we do not know if the tasks are running or not, so
# we cannot mark them as dead
if not worker_info:
return
active_tasks = {task['id'] for worker, tasks in six.iteritems(worker_info) for task in tasks}
# find jobs that are marked as running but not present in celery's active tasks
active_jobs = WooeyJob.objects.filter(status=WooeyJob.RUNNING)
to_disable = set()
for job in active_jobs:
if job.celery_id not in active_tasks:
to_disable.add(job.pk)
WooeyJob.objects.filter(pk__in=to_disable).update(status=WooeyJob.FAILED)
celery_app.conf.beat_schedule.update({
'cleanup-old-jobs': {
'task': 'wooey.tasks.cleanup_wooey_jobs',
'schedule': crontab(hour=0, minute=0), # cleanup at midnight each day
},
'cleanup-dead-jobs': {
'task': 'wooey.tasks.cleanup_dead_jobs',
'schedule': crontab(minute='*/10'), # run every 6 minutes
}
})
| wooey/Wooey | wooey/tasks.py | Python | bsd-3-clause | 10,721 | 0.002332 |
# Copyright (C) 2009-2010 Sergey Koposov
# This file is part of astrolibpy
#
# astrolibpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# astrolibpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with astrolibpy. If not, see <http://www.gnu.org/licenses/>.
import numpy, re
def from_hex(arr, delim=':'):
r=re.compile('\s*(\-?)(.+)%s(.+)%s(.+)'%(delim,delim))
ret=[]
for a in arr:
m = r.search(a)
sign = m.group(1)=='-'
if sign:
sign=-1
else:
sign=1
i1 = int(m.group(2))
i2 = int(m.group(3))
i3 = float(m.group(4))
val = sign*(int(i1)+int(i2)/60.+(float(i3))/3600.)
ret.append(val)
return numpy.array(ret)
| segasai/astrolibpy | my_utils/from_hex.py | Python | gpl-3.0 | 1,118 | 0.027728 |
import os
import sys
from time import time as clock
from rpython.rlib import jit
from rpython.rlib import rrandom
from rpython.jit.codewriter.policy import JitPolicy
# The regex is built up from a combination individual Regex objects.
# Each is responsiblef for implementing a specific operator.
class Regex(object):
_immutable_fields_ = ["empty"]
def __init__(self, empty):
self.empty = empty
self.marked = 0
def reset(self):
self.marked = 0
def shift(self, c, mark):
marked = self._shift(c, mark)
self.marked = marked
return marked
class Char(Regex):
_immutable_fields_ = ["c"]
def __init__(self, c):
Regex.__init__(self, 0)
self.c = c
def _shift(self, c, mark):
return mark & (c == self.c)
class Epsilon(Regex):
def __init__(self):
Regex.__init__(self, empty=1)
def _shift(self, c, mark):
return 0
class Binary(Regex):
_immutable_fields_ = ["left", "right"]
def __init__(self, left, right, empty):
Regex.__init__(self, empty)
self.left = left
self.right = right
def reset(self):
self.left.reset()
self.right.reset()
Regex.reset(self)
class Alternative(Binary):
def __init__(self, left, right):
empty = left.empty | right.empty
Binary.__init__(self, left, right, empty)
def _shift(self, c, mark):
marked_left = self.left.shift(c, mark)
marked_right = self.right.shift(c, mark)
return marked_left | marked_right
class Repetition(Regex):
_immutable_fields_ = ["re"]
def __init__(self, re):
Regex.__init__(self, 1)
self.re = re
def _shift(self, c, mark):
return self.re.shift(c, mark | self.marked)
def reset(self):
self.re.reset()
Regex.reset(self)
class Sequence(Binary):
def __init__(self, left, right):
empty = left.empty & right.empty
Binary.__init__(self, left, right, empty)
def _shift(self, c, mark):
old_marked_left = self.left.marked
marked_left = self.left.shift(c, mark)
marked_right = self.right.shift(
c, old_marked_left | (mark & self.left.empty))
return (marked_left & self.right.empty) | marked_right
# The matching loop just shifts each characer from the input string
# into the regex object. If it's "marked" by the time we hit the
# end of the string, then it matches.
jitdriver = jit.JitDriver(reds="auto", greens=["re"])
def match(re, s):
if not s:
return re.empty
result = re.shift(s[0], 1)
i = 1
while i < len(s):
jitdriver.jit_merge_point(re=re)
result = re.shift(s[i], 0)
i += 1
re.reset()
return result
def entry_point(argv):
# Adjust the amount of work we do based on command-line arguments.
# NUM_INPUTS increases the number of loop iterations.
# INPUT_LENGTH increases the amount of work done per loop iteration.
NUM_INPUTS = 1000
INPUT_LENGTH = 50
if len(argv) > 1:
NUM_INPUTS = int(argv[1])
if len(argv) > 2:
INPUT_LENGTH = int(argv[2])
if len(argv) > 3:
raise RuntimeError("too many arguments")
# Build up the regex pattern.
# Target pattern: (a|b)*a(a|b){20}a(a|b)*
# For now we use the same pattern every time, but it must be
# dynamically constructed or it gets eliminated at compile-time.
prefix = Sequence(Repetition(Alternative(Char("a"), Char("b"))), Char("a"))
suffix = Sequence(Char("a"), Repetition(Alternative(Char("a"), Char("b"))))
pattern = prefix
for _ in xrange(20):
pattern = Sequence(pattern, Alternative(Char("a"), Char("b")))
pattern = Sequence(pattern, suffix)
# Generate "random input" to match against the pattern.
# Ideally this would come from the outside world, but stdio
# on pypy.js doesn't seem to work just yet.
print "Generating", NUM_INPUTS, "strings of length", INPUT_LENGTH, "..."
inputs = [None] * NUM_INPUTS
r = rrandom.Random(42)
for i in xrange(len(inputs)):
s = []
for _ in xrange(INPUT_LENGTH):
if r.random() > 0.5:
s.append("a")
else:
s.append("b")
inputs[i] = "".join(s)
# Run each input string through the regex.
# Time how long it takes for the total run.
print "Matching all strings against the regex..."
ts = clock()
for i in xrange(len(inputs)):
# No output, we just want to exercise the loop.
matched = match(pattern, inputs[i])
tdiff = clock() - ts
print "Done!"
print "Matching time for %d strings: %f" % (len(inputs), tdiff)
print "Performed %f matches per second." % (len(inputs) / tdiff,)
return 0
def jitpolicy(driver):
return JitPolicy()
def target(*args):
return entry_point, None
if __name__ == "__main__":
sys.exit(entry_point(sys.argv))
| perkinslr/pypyjs | tools/rematcher.py | Python | mit | 4,972 | 0.000402 |
# -*- coding: utf-8 -*-
from reportlab.lib.colors import Color, CMYKColor, getAllNamedColors, toColor, \
HexColor
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.units import inch, cm
import base64
import httplib
import logging
import mimetypes
import os.path
import re
import reportlab
import shutil
import string
import sys
import tempfile
import types
import urllib
import urllib2
import urlparse
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
log = logging.getLogger("xhtml2pdf")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
#===============================================================================
# Memoize decorator
#===============================================================================
class memoized(object):
"""
A kwargs-aware memoizer, better than the one in python :)
Don't pass in too large kwargs, since this turns them into a tuple of tuples
Also, avoid mutable types (as usual for memoizers)
What this does is to create a dictionnary of {(*parameters):return value},
and uses it as a cache for subsequent calls to the same method.
It is especially useful for functions that don't rely on external variables
and that are called often. It's a perfect match for our getSize etc...
"""
def __init__(self, func):
self.cache = {}
self.func = func
self.__doc__ = self.func.__doc__ # To avoid great confusion
self.__name__ = self.func.__name__ # This also avoids great confusion
def __call__(self, *args, **kwargs):
# Make sure the following line is not actually slower than what you're
# trying to memoize
args_plus = tuple(kwargs.items())
key = (args, args_plus)
if key not in self.cache:
res = self.func(*args, **kwargs)
self.cache[key] = res
return self.cache[key]
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
#def _toColor(arg, default=None):
# '''try to map an arbitrary arg to a color instance'''
# if isinstance(arg, Color):
# return arg
# tArg = type(arg)
# if tArg in (types.ListType, types.TupleType):
# assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
# assert 0 <= min(arg) and max(arg) <= 1
# return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
# elif tArg == types.StringType:
# C = getAllNamedColors()
# s = arg.lower()
# if C.has_key(s): return C[s]
# try:
# return toColor(eval(arg))
# except:
# pass
# try:
# return HexColor(arg)
# except:
# if default is None:
# raise ValueError('Invalid color value %r' % arg)
# return default
@memoized
def getColor(value, default=None):
"""
Convert to color value.
This returns a Color object instance from a text bit.
"""
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
return toColor(value, default) # Calling the reportlab function
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
@memoized
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes.
That is the function taking a string of CSS size ('12pt', '1cm' and so on)
and converts it into a float in a standard unit (in our case, points).
>>> getSize('12pt')
12.0
>>> getSize('1cm')
28.346456692913385
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif isinstance(value, int):
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[-2:] == 'cm':
return float(value[:-2].strip()) * cm
elif value[-2:] == 'mm':
return (float(value[:-2].strip()) * mm) # 1mm = 0.1cm
elif value[-2:] == 'in':
return float(value[:-2].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'inch':
return float(value[:-4].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'pt':
return float(value[:-2].strip())
elif value[-2:] == 'pc':
return float(value[:-2].strip()) * 12.0 # 1pc == 12pt
elif value[-2:] == 'px':
return float(value[:-2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[-1:] == 'i': # 1pt == 1/72inch
return float(value[:-1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[-2:] == 'em': # XXX
return (float(value[:-2].strip()) * relative) # 1em = 1 * fontSize
elif value[-2:] == 'ex': # XXX
return (float(value[:-2].strip()) * (relative / 2.0)) # 1ex = 1/2 fontSize
elif value[-1:] == '%':
# print "%", value, relative, (relative * float(value[:-1].strip())) / 100.0
return (relative * float(value[:-1].strip())) / 100.0 # 1% = (fontSize * 1) / 100
elif value in ("normal", "inherit"):
return relative
elif _relativeSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif _absoluteSizeTable.has_key(value):
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
try:
value = float(value)
except:
log.warn("getSize: Not a float %r", value)
return default #value = 0
return max(0, value)
except Exception:
log.warn("getSize %r %r", original, relative, exc_info=1)
return default
@memoized
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w != None and h != None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
@memoized
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception, "box not defined right way"
x, y, w, h = [getSize(pos) for pos in box]
return getCoords(x, y, w, h, pagesize)
def getFrameDimensions(data, page_width, page_height):
"""Calculate dimensions of a frame
Returns left, top, width and height of the frame in points.
"""
box = data.get("-pdf-frame-box", [])
if len(box) == 4:
return [getSize(x) for x in box]
top = getSize(data.get("top", 0))
left = getSize(data.get("left", 0))
bottom = getSize(data.get("bottom", 0))
right = getSize(data.get("right", 0))
if "height" in data:
height = getSize(data["height"])
if "top" in data:
top = getSize(data["top"])
bottom = page_height - (top + height)
elif "bottom" in data:
bottom = getSize(data["bottom"])
top = page_height - (bottom + height)
if "width" in data:
width = getSize(data["width"])
if "left" in data:
left = getSize(data["left"])
right = page_width - (left + width)
elif "right" in data:
right = getSize(data["right"])
left = page_width - (right + width)
top += getSize(data.get("margin-top", 0))
left += getSize(data.get("margin-left", 0))
bottom += getSize(data.get("margin-bottom", 0))
right += getSize(data.get("margin-right", 0))
width = page_width - (left + right)
height = page_height - (top + bottom)
return left, top, width, height
@memoized
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception, "position not defined right way"
x, y = [getSize(pos) for pos in position]
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
#def getVAlign(value):
# # Unused
# return str(value).upper()
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
StringIO.StringIO,
StringIO.StringIO)
else:
STRATEGIES = (
StringIO.StringIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
#if hasattr(buffer, "read"):
#shutil.copyfileobj( fsrc, fdst[, length])
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
# we must set the file's position for preparing to read
self.seek(0)
def makeTempFile(self):
" Switch to next startegy. If an error occured stay with the first strategy "
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
" Get a named temporary file "
self.makeTempFile()
return self.name
def fileno(self):
"""Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
" Get value of file. Work around for second strategy "
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
return self._delegate.read()
def write(self, value):
" If capacity != -1 and length of file > capacity it is time to switch "
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = str(uri)
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
self.data = base64.decodestring(m.group("data"))
else:
# Check if we have an external scheme
if basepath and not urlparse.urlparse(uri).scheme:
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: %r", urlParts)
if urlParts.scheme == 'file':
if basepath and uri.startswith('/'):
uri = urlparse.urljoin(basepath, uri[1:])
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
# Drive letters have len==1 but we are looking for things like http:
elif urlParts.scheme in ('http', 'https'):
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
self.mimetype = r1.getheader("Content-Type", '').split(";")[0]
self.uri = uri
if r1.getheader("content-encoding") == "gzip":
import gzip
self.file = gzip.GzipFile(mode="rb", fileobj=r1)
else:
self.file = r1
else:
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
# Local data
if basepath:
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
self.data = self.file.read()
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a , **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)}
| tecknicaltom/xhtml2pdf | xhtml2pdf/util.py | Python | apache-2.0 | 27,717 | 0.003355 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class sale_order(orm.Model):
_inherit = "sale.order"
_columns = {
'cig': fields.char('CIG', size=15, help="Codice identificativo di gara"),
'cup': fields.char('CUP', size=15, help="Codice unico di Progetto")
}
#-----------------------------------------------------------------------------
# EVITARE LA COPIA DI 'NUMERO cig/cup'
#-----------------------------------------------------------------------------
def copy(self, cr, uid, id, default={}, context=None):
default = default or {}
default.update({
'cig': '',
'cup': '',
})
if 'cig' not in default:
default.update({
'cig': False
})
if 'cup' not in default:
default.update({
'cup': False
})
return super(sale_order, self).copy(cr, uid, id, default, context)
def _prepare_invoice(self, cr, uid, order, lines, context):
invoice_vals = super(sale_order, self)._prepare_invoice(cr, uid, order, lines, context)
invoice_vals.update({
'cig': order.cig,
'cup': order.cup,
})
return invoice_vals
def _prepare_order_picking(self, cr, uid, order, context=None):
picking_vals = super(sale_order, self)._prepare_order_picking(cr, uid, order, context)
picking_vals.update({
'cig': order.cig,
'cup': order.cup,
})
return picking_vals
# is better to use hook function, in this mode hope to speedup
def _inv_get(self, cr, uid, order, context=None):
return {
'carriage_condition_id': order.carriage_condition_id.id,
'goods_description_id': order.goods_description_id.id,
'cig': order.cig or '',
'cup': order.cup or ''
}
| iw3hxn/LibrERP | l10n_it_sale/models/inherit_sale_order.py | Python | agpl-3.0 | 2,854 | 0.002803 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.Timeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from tensorflow.python.client import timeline
class TimelineTest(tf.test.TestCase):
def _validateTrace(self, chrome_trace_format):
# Check that the supplied string is valid JSON.
trace = json.loads(chrome_trace_format)
# It should have a top-level key containing events.
self.assertTrue('traceEvents' in trace)
# Every event in the list should have a 'ph' field.
for event in trace['traceEvents']:
self.assertTrue('ph' in event)
def testSimpleTimeline(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with tf.device('/cpu:0'):
with tf.Session() as sess:
sess.run(
tf.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
def testTimelineCpu(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with self.test_session(use_gpu=False) as sess:
const1 = tf.constant(1.0, name='const1')
const2 = tf.constant(2.0, name='const2')
result = tf.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
def testTimelineGpu(self):
if not tf.test.is_gpu_available(cuda_only=True):
return
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
with self.test_session(force_gpu=True) as sess:
const1 = tf.constant(1.0, name='const1')
const2 = tf.constant(2.0, name='const2')
result = tf.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/gpu:0' in devices)
self.assertTrue('/gpu:0/stream:all' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
def testAnalysisAndAllocations(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
config = tf.ConfigProto(device_count={'CPU': 3})
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
const1 = tf.constant(1.0, name='const1')
with tf.device('/cpu:1'):
const2 = tf.constant(2.0, name='const2')
with tf.device('/cpu:2'):
result = const1 + const2 + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
step_analysis = tl.analyze_step_stats()
ctf = step_analysis.chrome_trace.format_to_string()
self._validateTrace(ctf)
maximums = step_analysis.allocator_maximums
self.assertTrue('cpu' in maximums)
cpu_max = maximums['cpu']
# At least const1 + const2, both float32s (4 bytes each)
self.assertGreater(cpu_max.num_bytes, 8)
self.assertGreater(cpu_max.timestamp, 0)
self.assertTrue('const1' in cpu_max.tensors)
self.assertTrue('const2' in cpu_max.tensors)
def testManyCPUs(self):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
config = tf.ConfigProto(device_count={'CPU': 3})
with tf.Session(config=config) as sess:
with tf.device('/cpu:0'):
const1 = tf.constant(1.0, name='const1')
with tf.device('/cpu:1'):
const2 = tf.constant(2.0, name='const2')
with tf.device('/cpu:2'):
result = const1 + const2 + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/cpu:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/cpu:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/cpu:2' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False,
show_dataflow=False)
self._validateTrace(ctf)
if __name__ == '__main__':
tf.test.main()
| sandeepdsouza93/TensorFlow-15712 | tensorflow/python/client/timeline_test.py | Python | apache-2.0 | 7,065 | 0.00368 |
import unittest
import os
import math
from rdbtools import RdbCallback, RdbParser
class RedisParserTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_rdb(self):
r = load_rdb('empty_database.rdb')
self.assert_('start_rdb' in r.methods_called)
self.assert_('end_rdb' in r.methods_called)
self.assertEquals(len(r.databases), 0, msg = "didn't expect any databases")
def test_multiple_databases(self):
r = load_rdb('multiple_databases.rdb')
self.assert_(len(r.databases), 2)
self.assert_(1 not in r.databases)
self.assertEquals(r.databases[0]["key_in_zeroth_database"], "zero")
self.assertEquals(r.databases[2]["key_in_second_database"], "second")
def test_keys_with_expiry(self):
r = load_rdb('keys_with_expiry.rdb')
expiry = r.expiry[0]['expires_ms_precision']
self.assertEquals(expiry.year, 2022)
self.assertEquals(expiry.month, 12)
self.assertEquals(expiry.day, 25)
self.assertEquals(expiry.hour, 10)
self.assertEquals(expiry.minute, 11)
self.assertEquals(expiry.second, 12)
self.assertEquals(expiry.microsecond, 573000)
def test_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][125], "Positive 8 bit integer")
self.assertEquals(r.databases[0][0xABAB], "Positive 16 bit integer")
self.assertEquals(r.databases[0][0x0AEDD325], "Positive 32 bit integer")
def test_negative_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][-123], "Negative 8 bit integer")
self.assertEquals(r.databases[0][-0x7325], "Negative 16 bit integer")
self.assertEquals(r.databases[0][-0x0AEDD325], "Negative 32 bit integer")
def test_string_key_with_compression(self):
r = load_rdb('easily_compressible_string_key.rdb')
key = "".join('a' for x in range(0, 200))
value = "Key that redis should compress easily"
self.assertEquals(r.databases[0][key], value)
def test_zipmap_thats_compresses_easily(self):
r = load_rdb('zipmap_that_compresses_easily.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_zipmap_that_doesnt_compress(self):
r = load_rdb('zipmap_that_doesnt_compress.rdb')
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["MKD1G6"], 2)
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["YNNXK"], "F7TI")
def test_zipmap_with_big_values(self):
''' See issue https://github.com/sripathikrishnan/redis-rdb-tools/issues/2
Values with length around 253/254/255 bytes are treated specially in the parser
This test exercises those boundary conditions
In order to test a bug with large ziplists, it is necessary to start
Redis with "hash-max-ziplist-value 21000", create this rdb file,
and run the test. That forces the 20kbyte value to be stored as a
ziplist with a length encoding of 5 bytes.
'''
r = load_rdb('zipmap_with_big_values.rdb')
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["253bytes"]), 253)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["254bytes"]), 254)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["255bytes"]), 255)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["300bytes"]), 300)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["20kbytes"]), 20000)
def test_hash_as_ziplist(self):
'''In redis dump version = 4, hashmaps are stored as ziplists'''
r = load_rdb('hash_as_ziplist.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_dictionary(self):
r = load_rdb('dictionary.rdb')
self.assertEquals(r.lengths[0]["force_dictionary"], 1000)
self.assertEquals(r.databases[0]["force_dictionary"]["ZMU5WEJDG7KU89AOG5LJT6K7HMNB3DEI43M6EYTJ83VRJ6XNXQ"],
"T63SOS8DQJF0Q0VJEZ0D1IQFCYTIPSBOUIAI9SB0OV57MQR1FI")
self.assertEquals(r.databases[0]["force_dictionary"]["UHS5ESW4HLK8XOGTM39IK1SJEUGVV9WOPK6JYA5QBZSJU84491"],
"6VULTCV52FXJ8MGVSFTZVAGK2JXZMGQ5F8OVJI0X6GEDDR27RZ")
def test_ziplist_that_compresses_easily(self):
r = load_rdb('ziplist_that_compresses_easily.rdb')
self.assertEquals(r.lengths[0]["ziplist_compresses_easily"], 6)
for idx, length in enumerate([6, 12, 18, 24, 30, 36]) :
self.assertEquals(("".join("a" for x in xrange(length))), r.databases[0]["ziplist_compresses_easily"][idx])
def test_ziplist_that_doesnt_compress(self):
r = load_rdb('ziplist_that_doesnt_compress.rdb')
self.assertEquals(r.lengths[0]["ziplist_doesnt_compress"], 2)
self.assert_("aj2410" in r.databases[0]["ziplist_doesnt_compress"])
self.assert_("cc953a17a8e096e76a44169ad3f9ac87c5f8248a403274416179aa9fbd852344"
in r.databases[0]["ziplist_doesnt_compress"])
def test_ziplist_with_integers(self):
r = load_rdb('ziplist_with_integers.rdb')
expected_numbers = []
for x in range(0,13):
expected_numbers.append(x)
expected_numbers += [-2, 13, 25, -61, 63, 16380, -16000, 65535, -65523, 4194304, 0x7fffffffffffffff]
self.assertEquals(r.lengths[0]["ziplist_with_integers"], len(expected_numbers))
for num in expected_numbers :
self.assert_(num in r.databases[0]["ziplist_with_integers"], "Cannot find %d" % num)
def test_linkedlist(self):
r = load_rdb('linkedlist.rdb')
self.assertEquals(r.lengths[0]["force_linkedlist"], 1000)
self.assert_("JYY4GIFI0ETHKP4VAJF5333082J4R1UPNPLE329YT0EYPGHSJQ" in r.databases[0]["force_linkedlist"])
self.assert_("TKBXHJOX9Q99ICF4V78XTCA2Y1UYW6ERL35JCIL1O0KSGXS58S" in r.databases[0]["force_linkedlist"])
def test_intset_16(self):
r = load_rdb('intset_16.rdb')
self.assertEquals(r.lengths[0]["intset_16"], 3)
for num in (0x7ffe, 0x7ffd, 0x7ffc) :
self.assert_(num in r.databases[0]["intset_16"])
def test_intset_32(self):
r = load_rdb('intset_32.rdb')
self.assertEquals(r.lengths[0]["intset_32"], 3)
for num in (0x7ffefffe, 0x7ffefffd, 0x7ffefffc) :
self.assert_(num in r.databases[0]["intset_32"])
def test_intset_64(self):
r = load_rdb('intset_64.rdb')
self.assertEquals(r.lengths[0]["intset_64"], 3)
for num in (0x7ffefffefffefffe, 0x7ffefffefffefffd, 0x7ffefffefffefffc) :
self.assert_(num in r.databases[0]["intset_64"])
def test_regular_set(self):
r = load_rdb('regular_set.rdb')
self.assertEquals(r.lengths[0]["regular_set"], 6)
for member in ("alpha", "beta", "gamma", "delta", "phi", "kappa") :
self.assert_(member in r.databases[0]["regular_set"], msg=('%s missing' % member))
def test_sorted_set_as_ziplist(self):
r = load_rdb('sorted_set_as_ziplist.rdb')
self.assertEquals(r.lengths[0]["sorted_set_as_ziplist"], 3)
zset = r.databases[0]["sorted_set_as_ziplist"]
self.assert_(floateq(zset['8b6ba6718a786daefa69438148361901'], 1))
self.assert_(floateq(zset['cb7a24bb7528f934b841b34c3a73e0c7'], 2.37))
self.assert_(floateq(zset['523af537946b79c4f8369ed39ba78605'], 3.423))
def test_filtering_by_keys(self):
r = load_rdb('parser_filters.rdb', filters={"keys":"k[0-9]"})
self.assertEquals(r.databases[0]['k1'], "ssssssss")
self.assertEquals(r.databases[0]['k3'], "wwwwwwww")
self.assertEquals(len(r.databases[0]), 2)
def test_filtering_by_type(self):
r = load_rdb('parser_filters.rdb', filters={"types":["sortedset"]})
self.assert_('z1' in r.databases[0])
self.assert_('z2' in r.databases[0])
self.assert_('z3' in r.databases[0])
self.assert_('z4' in r.databases[0])
self.assertEquals(len(r.databases[0]), 4)
def test_filtering_by_database(self):
r = load_rdb('multiple_databases.rdb', filters={"dbs":[2]})
self.assert_('key_in_zeroth_database' not in r.databases[0])
self.assert_('key_in_second_database' in r.databases[2])
self.assertEquals(len(r.databases[0]), 0)
self.assertEquals(len(r.databases[2]), 1)
def test_rdb_version_5_with_checksum(self):
r = load_rdb('rdb_version_5_with_checksum.rdb')
self.assertEquals(r.databases[0]['abcd'], 'efgh')
self.assertEquals(r.databases[0]['foo'], 'bar')
self.assertEquals(r.databases[0]['bar'], 'baz')
self.assertEquals(r.databases[0]['abcdef'], 'abcdef')
self.assertEquals(r.databases[0]['longerstring'], 'thisisalongerstring.idontknowwhatitmeans')
def floateq(f1, f2) :
return math.fabs(f1 - f2) < 0.00001
def load_rdb(file_name, filters=None) :
r = MockRedis()
parser = RdbParser(r, filters)
parser.parse(os.path.join(os.path.dirname(__file__), 'dumps', file_name))
return r
class MockRedis(RdbCallback):
def __init__(self) :
self.databases = {}
self.lengths = {}
self.expiry = {}
self.methods_called = []
self.dbnum = 0
def currentdb(self) :
return self.databases[self.dbnum]
def store_expiry(self, key, expiry) :
self.expiry[self.dbnum][key] = expiry
def store_length(self, key, length) :
if not self.dbnum in self.lengths :
self.lengths[self.dbnum] = {}
self.lengths[self.dbnum][key] = length
def get_length(self, key) :
if not key in self.lengths[self.dbnum] :
raise Exception('Key %s does not have a length' % key)
return self.lengths[self.dbnum][key]
def start_rdb(self):
self.methods_called.append('start_rdb')
def start_database(self, dbnum):
self.dbnum = dbnum
self.databases[dbnum] = {}
self.expiry[dbnum] = {}
self.lengths[dbnum] = {}
def set(self, key, value, expiry, info):
self.currentdb()[key] = value
if expiry :
self.store_expiry(key, expiry)
def start_hash(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_hash called with key %s that already exists' % key)
else :
self.currentdb()[key] = {}
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def hset(self, key, field, value):
if not key in self.currentdb() :
raise Exception('start_hash not called for key = %s', key)
self.currentdb()[key][field] = value
def end_hash(self, key):
if not key in self.currentdb() :
raise Exception('start_hash not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on hash %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_set(self, key, cardinality, expiry, info):
if key in self.currentdb() :
raise Exception('start_set called with key %s that already exists' % key)
else :
self.currentdb()[key] = []
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, cardinality)
def sadd(self, key, member):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
self.currentdb()[key].append(member)
def end_set(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on set %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_list(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_list called with key %s that already exists' % key)
else :
self.currentdb()[key] = []
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def rpush(self, key, value) :
if not key in self.currentdb() :
raise Exception('start_list not called for key = %s', key)
self.currentdb()[key].append(value)
def end_list(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on list %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_sorted_set(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_sorted_set called with key %s that already exists' % key)
else :
self.currentdb()[key] = {}
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def zadd(self, key, score, member):
if not key in self.currentdb() :
raise Exception('start_sorted_set not called for key = %s', key)
self.currentdb()[key][member] = score
def end_sorted_set(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on sortedset %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def end_database(self, dbnum):
if self.dbnum != dbnum :
raise Exception('start_database called with %d, but end_database called %d instead' % (self.dbnum, dbnum))
def end_rdb(self):
self.methods_called.append('end_rdb')
| idning/redis-rdb-tools | tests/parser_tests.py | Python | mit | 14,949 | 0.009967 |
import os
from buildtest.defaults import BUILDTEST_ROOT
from buildtest.menu.build import discover_buildspecs
included_bp, excluded_bp = discover_buildspecs(
buildspec=[os.path.join(BUILDTEST_ROOT, "tutorials")]
)
print(f"discovered_buildspec: {included_bp} excluded buildspec: {excluded_bp}")
| HPC-buildtest/buildtest-framework | docs/scripting_examples/ex1.py | Python | mit | 300 | 0.003333 |
from db_Dao import init_db
init_db()
| fs714/drcontroller | drcontroller/db/init_db.py | Python | apache-2.0 | 38 | 0 |
# -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;upnp
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasUPnPVersion)#
#echo(__FILEPATH__)#
"""
from dNG.data.text.link import Link
from dNG.data.upnp.client_settings_mixin import ClientSettingsMixin
from dNG.data.upnp.service import Service
from dNG.data.upnp.upnp_exception import UpnpException
from dNG.data.upnp.variable import Variable
from dNG.plugins.hook import Hook
from dNG.runtime.not_implemented_exception import NotImplementedException
from dNG.runtime.type_exception import TypeException
class AbstractService(Service, ClientSettingsMixin):
"""
An extended, abstract service implementation for server services.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: upnp
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
def __init__(self):
"""
Constructor __init__(AbstractService)
:since: v0.2.00
"""
Service.__init__(self)
ClientSettingsMixin.__init__(self)
self.configid = None
"""
UPnP configId value
"""
self.host_service = False
"""
UPnP service is managed by host
"""
self.type = None
"""
UPnP service type
"""
self.udn = None
"""
UPnP UDN value
"""
self.upnp_domain = None
"""
UPnP service specification domain
"""
self.version = None
"""
UPnP service type version
"""
#
def add_host_action(self, action, argument_variables = None, return_variable = None, result_variables = None):
"""
Adds the given host service action.
:param action: SOAP action
:param argument_variables: Argument variables definition
:param return_variable: Return variable definition
:param result_variables: Result variables definition
:since: v0.2.00
"""
if (action not in self.actions):
if (argument_variables is None): argument_variables = [ ]
elif (type(argument_variables) is not list): raise TypeException("Given argument variables definition is invalid")
if (return_variable is None): return_variable = { }
elif (type(return_variable) is not dict): raise TypeException("Given return variables definition is invalid")
if (result_variables is None): result_variables = [ ]
elif (type(result_variables) is not list): raise TypeException("Given result variables definition is invalid")
self.actions[action] = { "argument_variables": argument_variables,
"return_variable": return_variable,
"result_variables": result_variables
}
#
#
def add_host_variable(self, name, definition):
"""
Adds the given host service variable.
:param name: Variable name
:param definition: Variable definition
:since: v0.2.00
"""
if (name not in self.variables):
if (type(definition) is not dict): raise TypeException("Given variable definition is invalid")
self.variables[name] = definition
#
#
def get_name(self):
"""
Returns the UPnP service name (URN without version).
:return: (str) Service name
:since: v0.2.00
"""
return ("{0}:service:{1}".format(self.upnp_domain, self.type) if (self.host_service) else Service.get_name(self))
#
def get_service_id(self):
"""
Returns the UPnP serviceId value.
:return: (str) UPnP serviceId value
:since: v0.2.00
"""
return (self.service_id if (self.host_service) else Service.get_service_id(self))
#
def get_service_id_urn(self):
"""
Returns the UPnP serviceId value.
:return: (str) UPnP serviceId URN
:since: v0.2.00
"""
return ("{0}:serviceId:{1}".format(self.upnp_domain, self.service_id) if (self.host_service) else Service.get_service_id_urn(self))
#
def get_type(self):
"""
Returns the UPnP service type.
:return: (str) Service type
:since: v0.2.00
"""
return (self.type if (self.host_service) else Service.get_type(self))
#
def get_udn(self):
"""
Returns the UPnP UDN value.
:return: (str) UPnP device UDN
:since: v0.2.00
"""
return (self.udn if (self.host_service) else Service.get_udn(self))
#
def get_upnp_domain(self):
"""
Returns the UPnP service specification domain.
:return: (str) UPnP device UUID
:since: v0.2.00
"""
return (self.upnp_domain if (self.host_service) else Service.get_upnp_domain(self))
#
def get_urn(self):
"""
Returns the UPnP serviceType value.
:return: (str) UPnP URN
:since: v0.2.00
"""
return ("{0}:service:{1}:{2}".format(self.get_upnp_domain(), self.get_type(), self.get_version())
if (self.host_service) else
Service.get_urn(self)
)
#
def get_version(self):
"""
Returns the UPnP service type version.
:return: (str) Service type version
:since: v0.2.00
"""
return (self.version if (self.host_service) else Service.get_version(self))
#
def get_xml(self):
"""
Returns the UPnP SCPD.
:return: (str) UPnP SCPD XML
:since: v0.2.00
"""
xml_resource = self._get_xml(self._init_xml_resource())
return xml_resource.export_cache(True)
#
def _get_xml(self, xml_resource):
"""
Returns the UPnP SCPD.
:param xml_resource: XML resource
:return: (object) UPnP SCPD XML resource
:since: v0.2.00
"""
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}._get_xml()- (#echo(__LINE__)#)", self, context = "pas_upnp")
client_settings = self.get_client_settings()
if (not client_settings.get("upnp_xml_cdata_encoded", False)): xml_resource.set_cdata_encoding(False)
attributes = { "xmlns": "urn:schemas-upnp-org:service-1-0" }
if (self.configid is not None): attributes['configId'] = self.configid
xml_resource.add_node("scpd", attributes = attributes)
xml_resource.set_cached_node("scpd")
spec_version = (self.get_spec_version()
if (client_settings.get("upnp_spec_versioning_supported", True)) else
( 1, 0 )
)
xml_resource.add_node("scpd specVersion major", spec_version[0])
xml_resource.add_node("scpd specVersion minor", spec_version[1])
if (len(self.actions) > 0):
position = 0
for action_name in self.actions:
xml_base_path = "scpd actionList action#{0:d}".format(position)
xml_resource.add_node(xml_base_path)
xml_resource.set_cached_node(xml_base_path)
action = self.actions[action_name]
xml_resource.add_node("{0} name".format(xml_base_path), action_name)
variables = [ ]
for variable in action['argument_variables']:
variable = variable.copy()
variable['direction'] = "in"
variables.append(variable)
#
if (action['return_variable'] is not None):
variable = action['return_variable'].copy()
variable['direction'] = "out"
variable['retval'] = True
variables.append(variable)
#
for variable in action['result_variables']:
variable = variable.copy()
variable['direction'] = "out"
variables.append(variable)
#
variables_count = len(variables)
for position_variable in range(0, variables_count):
xml_resource.add_node("{0} argumentList argument#{1:d}".format(xml_base_path, position_variable))
xml_resource.add_node("{0} argumentList argument#{1:d} name".format(xml_base_path, position_variable), variables[position_variable]['name'])
xml_resource.add_node("{0} argumentList argument#{1:d} direction".format(xml_base_path, position_variable), variables[position_variable]['direction'])
if ("retval" in variables[position_variable]): xml_resource.add_node("{0} argumentList argument#{1:d} retval".format(xml_base_path, position_variable))
xml_resource.add_node("{0} argumentList argument#{1:d} relatedStateVariable".format(xml_base_path, position_variable), variables[position_variable]['variable'])
#
position += 1
#
position_variable = 0
xml_resource.add_node("scpd serviceStateTable".format(xml_base_path))
xml_resource.set_cached_node("scpd serviceStateTable".format(xml_base_path))
for variable_name in self.variables:
variable = self.variables[variable_name]
xml_base_path = "scpd serviceStateTable stateVariable#{0:d}".format(position_variable)
attributes = { }
if (not variable['is_sending_events']): attributes['sendEvents'] = "no"
if (variable['is_multicasting_events']): attributes['multicast'] = "yes"
xml_resource.add_node(xml_base_path, attributes = attributes)
xml_resource.add_node("{0} name".format(xml_base_path), variable_name)
xml_resource.add_node("{0} dataType".format(xml_base_path), variable['type'])
if ("value" in variable): xml_resource.add_node("{0} defaultValue".format(xml_base_path), variable['value'])
values_allowed_count = (len(variable['values_allowed']) if ("values_allowed" in variable) else 0)
for position_values_allowed in range(0, values_allowed_count): xml_resource.add_node("{0} allowedValueList allowedValue#{1:d}".format(xml_base_path, position_values_allowed), variable['values_allowed'][position_values_allowed])
if ("values_min" in variable): xml_resource.add_node("{0} allowedValueRange minimum".format(xml_base_path), variable['values_min'])
if ("values_max" in variable): xml_resource.add_node("{0} allowedValueRange maximum".format(xml_base_path), variable['values_max'])
if ("values_stepping" in variable): xml_resource.add_node("{0} allowedValueRange step".format(xml_base_path), variable['values_stepping'])
position_variable += 1
#
#
return xml_resource
#
def _handle_gena_registration(self, sid):
"""
Handles the registration of an UPnP device at GENA with the given SID.
:param sid: UPnP SID
:since: v0.2.00
"""
pass
#
def handle_soap_call(self, action, arguments_given = None):
"""
Executes the given SOAP action.
:param action: SOAP action called
:param arguments_given: SOAP arguments
:return: (list) Result argument list
:since: v0.2.00
"""
# pylint: disable=broad-except,star-args
if (self.log_handler is not None): self.log_handler.debug("#echo(__FILEPATH__)# -{0!r}.handle_soap_call({1})- (#echo(__LINE__)#)", self, action, context = "pas_upnp")
_return = UpnpException("pas_http_core_500")
action_definition = None
action_method = AbstractService.RE_CAMEL_CASE_SPLITTER.sub("\\1_\\2", action).lower()
arguments = { }
if (arguments_given is None): arguments_given = [ ]
is_request_valid = False
if (action == "QueryStateVariable"):
action_definition = { "argument_variables": [ { "name": "varName", "variable": "A_ARG_TYPE_VarName" } ],
"return_variable": { "name": "return", "variable": "A_ARG_TYPE_VarValue" },
"result_variables": [ ]
}
variables = { "A_ARG_TYPE_VarName": { "is_sending_events": False,
"is_multicasting_events": False,
"type": "string"
},
"A_ARG_TYPE_VarValue": { "is_sending_events": False,
"is_multicasting_events": False,
"type": "string"
}
}
if (len(arguments_given) == 1): arguments_given = { "varName": arguments_given.popitem()[1] }
elif (action in self.actions):
action_definition = self.actions[action]
variables = self.variables
#
if (action_definition is not None):
is_request_valid = True
for argument in action_definition['argument_variables']:
if (argument['variable'] not in variables):
is_request_valid = False
_return = UpnpException("pas_http_core_500")
break
elif (argument['name'] in arguments_given): argument_given = arguments_given[argument['name']]
elif ("value" in variables[argument['variable']]): argument_given = variables[argument['variable']]['value']
else:
is_request_valid = False
_return = UpnpException("pas_http_core_400", 402)
break
#
if (is_request_valid):
argument_name = AbstractService.RE_CAMEL_CASE_SPLITTER.sub("\\1_\\2", argument['name']).lower()
arguments[argument_name] = Variable.get_native(Variable.get_native_type(variables[argument['variable']]), argument_given)
#
#
else: _return = UpnpException("pas_http_core_400", 401)
result = None
try:
if (is_request_valid):
result = (getattr(self, action_method)(**arguments)
if hasattr(self, action_method) else
Hook.call_one("dNG.pas.upnp.services.{0}.handle.{1}".format(self.__class__.__name__, action_method), **arguments)
)
#
except Exception as handled_exception:
if (self.log_handler is not None): self.log_handler.error(handled_exception, context = "pas_upnp")
result = UpnpException("pas_http_core_500", _exception = handled_exception)
#
if (isinstance(result, Exception)): _return = result
elif (is_request_valid):
return_values = ([ ] if (action_definition['return_variable'] is None) else [ action_definition['return_variable'] ])
return_values += action_definition['result_variables']
return_values_length = len(return_values)
_return = [ ]
is_dict_result = (type(result) is dict)
if (return_values_length > 1 and (not is_dict_result)): _return = UpnpException("pas_http_core_500", value = "Response can not be generated based on the UPnP action result from '{0}'".format(self.udn))
elif (return_values_length > 0):
for return_value in return_values:
return_value_name = AbstractService.RE_CAMEL_CASE_SPLITTER.sub("\\1_\\2", return_value['name']).lower()
if (is_dict_result): result_value = (result[return_value_name] if (return_value_name in result) else None)
else: result_value = result
if (return_value['variable'] not in variables or result_value is None):
_return = UpnpException("pas_http_core_500", value = "Variable '{0}' is not defined for '{1}'".format(return_value['variable'], self.udn))
break
else: _return.append({ "name": return_value['name'], "value": Variable.get_upnp_value(variables[return_value['variable']], result_value) })
#
elif (result is not None): _return = UpnpException("pas_http_core_500", value = "Expected empty response does not correspond to UPnP action result from '{0}'".format(self.udn))
#
return _return
#
def init_host(self, device, service_id, configid = None):
"""
Initializes a host service.
:param device: Host device this UPnP service is added to
:param service_id: Unique UPnP service ID
:param configid: UPnP configId for the host device
:return: (bool) Returns true if initialization was successful.
:since: v0.2.00
"""
self.configid = configid
self.host_service = True
self.service_id = service_id
self.udn = device.get_udn()
self.url_base = "{0}{1}/".format(device.get_url_base(), Link.encode_query_value(service_id))
self.url_control = "{0}control".format(self.url_base)
self.url_event_control = "{0}eventsub".format(self.url_base)
self.url_scpd = "{0}xml".format(self.url_base)
self._init_host_actions(device)
self._init_host_variables(device)
Hook.call("dNG.pas.upnp.Service.initHost", device = device, service = self)
Hook.register_weakref("dNG.pas.upnp.Gena.onRegistered", self._on_gena_registration)
return ((len(self.actions) + len(self.variables)) > 0)
#
def _init_host_actions(self, device):
"""
Initializes the dict of host service actions.
:param device: Host device this UPnP service is added to
:since: v0.2.00
"""
self.actions = { }
#
def _init_host_variables(self, device):
"""
Initializes the dict of host service variables.
:param device: Host device this UPnP service is added to
:since: v0.2.00
"""
self.variables = { }
#
def is_managed(self):
"""
True if the host manages the service.
:return: (bool) False if remote UPnP service
:since: v0.2.00
"""
return self.host_service
#
def _on_gena_registration(self, params, last_return = None):
"""
Called after an UPnP device registered for GENA.
:return: (mixed) Return value
:since: v0.2.00
"""
if (self.host_service
and params.get("usn") == self.get_usn()
and "sid" in params
): self._handle_gena_registration(params['sid'])
return last_return
#
def query_state_variable(self, var_name):
"""
UPnP call for "QueryStateVariable".
:param var_name: Variable to be returned
:return: (mixed) Variable value
:since: v0.2.00
"""
raise NotImplementedException()
#
def remove_host_action(self, action):
"""
Removes the given host service action.
:param action: SOAP action
:since: v0.2.00
"""
if (action in self.actions): del(self.actions[action])
#
def remove_host_variable(self, name, definition):
"""
Removes the given host service variable.
:param name: Variable name
:since: v0.2.00
"""
if (name in self.variables): del(self.variables[name])
#
def set_configid(self, configid):
"""
Sets the UPnP configId value.
:param configid: Current UPnP configId
:since: v0.2.00
"""
self.configid = configid
#
#
| dNG-git/pas_upnp | src/dNG/data/upnp/services/abstract_service.py | Python | gpl-2.0 | 20,705 | 0.008211 |
#!/usr/bin/env python
# Takes apart large IATI XML files and outputs one file per reporting org.
# Copyright 2013 Mark Brough.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License v3.0 as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
from lxml import etree
import unicodecsv
import sys
import os
# FIXME: if there are multiple countries/countries+regions, then don't
# output to the same file.
def segment_file(prefix, filename, output_directory):
print "Segmenting file", filename
doc=etree.parse(os.path.join(filename))
extorgs = set(doc.xpath("//iati-activity/reporting-org/@ref"))
print "Found orgs", list(extorgs)
out = {}
iatiactivities = doc.xpath('//iati-activities')[0]
for org in extorgs:
out[org] = {
'title': prefix.upper() + " Activity file " + org,
'data': etree.Element('iati-activities')
}
for attribute, attribute_value in iatiactivities.items():
out[org]['data'].set(attribute, attribute_value)
activities = doc.xpath('//iati-activity')
for activity in activities:
if (activity.xpath("reporting-org/@ref")) and (activity.xpath("reporting-org/@ref")[0] != ""):
org = activity.xpath("reporting-org/@ref")[0]
out[org]['orgname'] = activity.xpath("reporting-org/text()")[0] if activity.xpath("reporting-org/text()") else ""
out[org]['orgtype'] = activity.xpath("reporting-org/@type")[0] if activity.xpath("reporting-org/@type") else ""
out[org]['data'].append(activity)
# Create metadata file...
fieldnames = ['org', 'orgname', 'orgtype', 'official', 'filename', 'url',
'package_name', 'package_title']
metadata_file = open(output_directory + 'metadata.csv', 'w')
metadata = unicodecsv.DictWriter(metadata_file, fieldnames)
metadata.writeheader()
for org, data in out.items():
print "Writing data for", org
# Check not empty
if data['data'].xpath('//iati-activity'):
d = etree.ElementTree(data['data'])
d.write(output_directory+prefix+"-"+org+".xml",
pretty_print=True,
xml_declaration=True,
encoding="UTF-8")
metadata.writerow({
'org':org,
'orgname':data['orgname'],
'orgtype':data['orgtype'],
'filename':prefix+"-"+org+'.xml',
'package_name': prefix+"-"+org,
'package_title': data['title']})
print "Finished writing data, find the files in", output_directory
metadata_file.close()
if __name__ == '__main__':
arguments = sys.argv
arguments.pop(0)
prefix = arguments[0]
arguments.pop(0)
filenames = arguments
output_directory = 'data/'
if not filenames:
print "No filenames"
else:
for filename in filenames:
segment_file(prefix, filename, output_directory)
| markbrough/iati-country-tester | segment_ro.py | Python | mit | 3,383 | 0.005912 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#windowController.py
from PyQt4 import QtGui
import sys, multiprocessing
import mainWindow, windowListerner
class QWindowsController(multiprocessing.Process):
def __init__(self, messageBox):
super(QWindowsController, self).__init__()
self.messageBox = messageBox
def run(self):
app = QtGui.QApplication(sys.argv)
QMain = mainWindow.QMainWindow()
#界面信息处理线程
wListerner = windowListerner.QWindowListerner(QMain, self.messageBox)
wListerner.start()
#显示主窗口
QMain.show()
sys.exit(app.exec_())
| sharmaking/CoIntegrationAnalysis | windowController.py | Python | mit | 579 | 0.03255 |
from birdseye.server import main
if __name__ == '__main__':
main()
| alexmojaki/birdseye | birdseye/__main__.py | Python | mit | 72 | 0 |
# coding: utf-8
"""Test the kernel specs webservice API."""
import errno
import io
import json
import os
import shutil
pjoin = os.path.join
import requests
from IPython.kernel.kernelspec import NATIVE_KERNEL_NAME
from IPython.html.utils import url_path_join
from IPython.html.tests.launchnotebook import NotebookTestBase, assert_http_error
# Copied from IPython.kernel.tests.test_kernelspec so updating that doesn't
# break these tests
sample_kernel_json = {'argv': ['cat', '{connection_file}'],
'display_name': 'Test kernel',
}
some_resource = u"The very model of a modern major general"
class KernelSpecAPI(object):
"""Wrapper for notebook API calls."""
def __init__(self, base_url):
self.base_url = base_url
def _req(self, verb, path, body=None):
response = requests.request(verb,
url_path_join(self.base_url, path),
data=body,
)
response.raise_for_status()
return response
def list(self):
return self._req('GET', 'api/kernelspecs')
def kernel_spec_info(self, name):
return self._req('GET', url_path_join('api/kernelspecs', name))
def kernel_resource(self, name, path):
return self._req('GET', url_path_join('kernelspecs', name, path))
class APITest(NotebookTestBase):
"""Test the kernelspec web service API"""
def setUp(self):
ipydir = self.ipython_dir.name
sample_kernel_dir = pjoin(ipydir, 'kernels', 'sample')
try:
os.makedirs(sample_kernel_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(pjoin(sample_kernel_dir, 'kernel.json'), 'w') as f:
json.dump(sample_kernel_json, f)
with io.open(pjoin(sample_kernel_dir, 'resource.txt'), 'w',
encoding='utf-8') as f:
f.write(some_resource)
self.ks_api = KernelSpecAPI(self.base_url())
def test_list_kernelspecs_bad(self):
"""Can list kernelspecs when one is invalid"""
bad_kernel_dir = pjoin(self.ipython_dir.name, 'kernels', 'bad')
try:
os.makedirs(bad_kernel_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(pjoin(bad_kernel_dir, 'kernel.json'), 'w') as f:
f.write("garbage")
model = self.ks_api.list().json()
assert isinstance(model, dict)
self.assertEqual(model['default'], NATIVE_KERNEL_NAME)
specs = model['kernelspecs']
assert isinstance(specs, dict)
# 2: the sample kernelspec created in setUp, and the native Python
# kernel
self.assertGreaterEqual(len(specs), 2)
shutil.rmtree(bad_kernel_dir)
def test_list_kernelspecs(self):
model = self.ks_api.list().json()
assert isinstance(model, dict)
self.assertEqual(model['default'], NATIVE_KERNEL_NAME)
specs = model['kernelspecs']
assert isinstance(specs, dict)
# 2: the sample kernelspec created in setUp, and the native Python
# kernel
self.assertGreaterEqual(len(specs), 2)
def is_sample_kernelspec(s):
return s['name'] == 'sample' and s['display_name'] == 'Test kernel'
def is_default_kernelspec(s):
return s['name'] == NATIVE_KERNEL_NAME and s['display_name'].startswith("IPython")
assert any(is_sample_kernelspec(s) for s in specs.values()), specs
assert any(is_default_kernelspec(s) for s in specs.values()), specs
def test_get_kernelspec(self):
spec = self.ks_api.kernel_spec_info(
'Sample').json() # Case insensitive
self.assertEqual(spec['display_name'], 'Test kernel')
def test_get_nonexistant_kernelspec(self):
with assert_http_error(404):
self.ks_api.kernel_spec_info('nonexistant')
def test_get_kernel_resource_file(self):
res = self.ks_api.kernel_resource('sAmple', 'resource.txt')
self.assertEqual(res.text, some_resource)
def test_get_nonexistant_resource(self):
with assert_http_error(404):
self.ks_api.kernel_resource('nonexistant', 'resource.txt')
with assert_http_error(404):
self.ks_api.kernel_resource('sample', 'nonexistant.txt')
| mattvonrocketstein/smash | smashlib/ipy3x/html/services/kernelspecs/tests/test_kernelspecs_api.py | Python | mit | 4,428 | 0.001355 |
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/StreamLines/F7 case
# Create Stream Lines for all fields of the the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Directory for saving snapshots
picturedir = get_picture_dir("StreamLines/F7")
# Create presentations
myParavis = paravis.myParavis
file = datadir + "occ4050.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "\nCreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.STREAMLINES], picturedir, pictureext)
| FedoraScientific/salome-paravis | test/VisuPrs/StreamLines/F7.py | Python | lgpl-2.1 | 1,521 | 0.001972 |
from datetime import datetime, timedelta
import dateutil.parser
import discord
from discord.ext import commands
import utils
def not_season_or_year(ctx):
now = datetime.now()
return AniList.seasons[now.month // 3] not in ctx.message.content or str(now.year) not in ctx.message.content
class AniList(utils.SessionCog):
"""Commands that access AniList. Mostly just for seasonal anime."""
daynames = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
seasons = ["winter", "spring", "summer", "fall"]
season_colors = {
'winter': discord.Colour.lighter_grey(),
'spring': discord.Colour.green(),
'summer': discord.Colour.gold(),
'fall': discord.Colour.orange()
}
types = ["tv", "tv short"]
@commands.command()
@utils.cooldown_reset_if(not_season_or_year)
@commands.cooldown(1, 1800, commands.BucketType.channel)
async def anime_list(self, season=None, year=None):
"""Lists anime airing in a given season, or the current season if none is specified.
Can take both year and season because of the rollover into winter season."""
def datestr(da: datetime):
if da is None:
return "Not Listed"
return dateutil.parser.parse(da).strftime("%m/%d/%Y")
token = await self.check_token()
now = datetime.now()
season = season or self.seasons[now.month // 3]
year = year or now.year
days = [[], [], [], [], [], [], []]
m = await self.bot.say("collecting info")
for t in self.types:
params = {"access_token": token, "year": year, "season": season, "type": t}
url = "https://anilist.co/api/browse/anime"
async with self.session.get(url, params=params) as r:
js = await r.json()
if r.status != 200:
await self.bot.edit_message(m, f"error in api call: response {r.status}\n{r.reason}\n{js['error_message']}")
return
for anime in js:
if not anime["adult"]:
url = f"https://anilist.co/api/anime/{anime['id']}"
async with self.session.get(url, params={"access_token": token}) as r2:
anime = await r2.json()
d = dateutil.parser.parse(anime["start_date"])
days[d.weekday()].append(anime)
anilist_url = f'http://anilist.co/browse/anime?sort=start_date-desc&year={year}&season={season}'
e: discord.Embed = discord.Embed(
title=f"{season.title()} {year} Anime",
url=anilist_url,
color=self.season_colors[season]
)
for day, shows in enumerate(days):
shows = sorted(shows, key=lambda a: a['start_date_fuzzy'])
value = [
f"""*{anime['title_romaji']}*
{datestr(anime['start_date'])} — {datestr(anime['end_date'])}
{f"Time until next episode: {utils.down_to_minutes(timedelta(seconds=anime['airing']['countdown']))}"
if anime['airing'] is not None and 'countdown' in anime['airing'] else ''
}
"""
for anime in shows
]
pages = [[]]
for v in value:
if len('\n'.join(pages[-1])) + len(v) < 1024:
pages[-1].append(v)
else:
pages.append([v])
e.add_field(name=self.daynames[day], value='\n'.join(pages[0]), inline=False)
for p in pages[1:]:
e.add_field(name='\N{ZERO WIDTH SPACE}', value='\n'.join(p), inline=False)
await self.bot.delete_message(m)
await self.bot.say(embed=e)
async def check_token(self):
params = {"client_id": utils.tokens['anilist_id'], "client_secret": utils.tokens['anilist_secret'], "grant_type": "client_credentials"}
url = "https://anilist.co/api/auth/access_token"
async with self.session.post(url, params=params) as r:
if r.status != 200:
await self.bot.say(f"error in check_token call: response {r.status}")
return
token = (await r.json())["access_token"]
return token
def setup(bot):
bot.add_cog(AniList(bot))
| Drowrin/Weeabot | cogs/anilist.py | Python | mit | 4,392 | 0.003189 |
"""
Tensor Contraction Engine output parser.
This module provides parsers of the output of the Tensor Contraction Engine of
So Hirata into Tensor objects in drudge.
"""
import collections
import itertools
import re
from sympy import nsimplify, sympify, Symbol
from drudge import Term
#
# The driver function
# -------------------
#
def parse_tce_out(tce_out, range_cb, base_cb):
"""Parse a TCE output into a list of terms.
A list of terms, and a dictionary of free symbols will be returned.
"""
lines = []
for line in tce_out.splitlines():
stripped = line.strip()
if len(stripped) > 0:
lines.append(stripped)
continue
free_vars = collections.defaultdict(set)
return list(itertools.chain.from_iterable(
_parse_tce_line(line, range_cb, base_cb, free_vars)
for line in lines
)), free_vars
#
# Internal functions
# ------------------
#
def _parse_tce_line(line, range_cb, base_cb, free_vars):
"""Parse a TCE output line into a list of terms.
"""
# Get the initial part in the bracket and the actual term specification
# part after it.
match_res = re.match(
r'^\s*\[(?P<factors>.*)\](?P<term>[^\[\]]+)$',
line
)
if match_res is None:
raise ValueError('Invalid TCE output line', line)
factors_str = match_res.group('factors').strip()
term_str = match_res.group('term').strip()
# Get the actual term in its raw form.
raw_term = _parse_term(term_str, range_cb, base_cb, free_vars)
# Generates the actual list of terms based on the factors, possibly with
# permutations.
return _gen_terms(factors_str, raw_term)
#
# Some constants for the TCE output format
#
_SUM_BASE = 'Sum'
#
# Parsing the term specification
#
def _parse_term(term_str, range_cb, base_cb, free_vars):
"""Parse the term string after the square bracket into a Term.
"""
# First break the string into indexed values.
summed_vars, idxed_vals = _break_into_idxed(term_str)
sums = tuple((Symbol(i), range_cb(i)) for i in summed_vars)
dumms = {i[0] for i in sums}
amp = sympify('1')
for base, indices in idxed_vals:
indices_symbs = tuple(Symbol(i) for i in indices)
for i, j in zip(indices_symbs, indices):
if i not in dumms:
free_vars[range_cb(j)].add(i)
continue
base_symb = base_cb(base, indices_symbs)
amp *= base_symb[indices_symbs]
continue
return Term(sums=sums, amp=amp, vecs=())
def _break_into_idxed(term_str):
"""Break the term string into pairs of indexed base and indices.
Both the base and the indices variables are going to be simple strings in
the return value.
"""
# First break it into fields separated by the multiplication asterisk.
fields = (i for i in re.split(r'\s*\*\s*', term_str) if len(i) > 0)
# Parse the fields one-by-one.
idxed_vals = []
for field in fields:
# Break the field into the base part and the indices part.
match_res = re.match(
r'(?P<base>\w+)\s*\((?P<indices>.*)\)', field
)
if match_res is None:
raise ValueError('Invalid indexed value', field)
# Generate the final result.
idxed_vals.append((
match_res.group('base'),
tuple(match_res.group('indices').split())
))
continue
# Summation always comes first in TCE output.
if idxed_vals[0][0] == _SUM_BASE:
return idxed_vals[0][1], idxed_vals[1:]
else:
return (), idxed_vals
#
# Final term generation based on the raw term
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
def _gen_terms(factors_str, raw_term):
"""Generate the actual terms based on the initial factor string.
The raw term should be a term directly parsed from the term specification
part of the TCE line. This function will use the factors string in the
square bracket to turn it into a list of terms for the final value of the
line.
"""
# The regular expression for a factor.
factor_regex = r'\s*'.join([
r'(?P<sign>[+-])',
r'(?P<factor_number>[0-9.]+)',
r'(?:\*\s*P\((?P<perm_from>[^=>]*)=>(?P<perm_to>[^)]*)\))?',
]) + r'\s*'
mismatch_regex = r'.'
regex = '(?P<factor>{})|(?P<mismatch>{})'.format(
factor_regex, mismatch_regex
)
# Iterate over the factors.
terms = []
for match_res in re.finditer(regex, factors_str):
# Test if the result matches a factor.
if match_res.group('factor') is None:
raise ValueError('Invalid factor string', factors_str)
# The value of the factor.
factor_value = nsimplify(''.join(
match_res.group('sign', 'factor_number')
), rational=True)
# Get the substitution for the permutation of the indices.
if match_res.group('perm_from') is not None:
from_vars = match_res.group('perm_from').split()
to_vars = match_res.group('perm_to').split()
subs = {
Symbol(from_var): Symbol(to_var)
for from_var, to_var in zip(from_vars, to_vars)
}
else:
subs = {}
# Add the result.
terms.append(raw_term.subst(subs).scale(factor_value))
# Continue to the next factor.
continue
return terms
| tschijnmo/drudge | drudge/_tceparser.py | Python | mit | 5,439 | 0 |
import serial
from time import sleep
import base64
import sys
def readSerial():
while True:
response = ser.readline();
return response
# main
ser = serial.Serial(port='/dev/ttyACM0', baudrate=115200, timeout=3)
ser.isOpen()
# Wait UART Listener VM to be done.
while(1):
message = readSerial()
if 'Listener' in message:
break
#Requires the keycode to the Litener VM
ser.write('\n'.encode())
ser.flush()
#Receive the keyCode
while(1):
message = readSerial()
if 'keyCode' in message:
hex_keyCode = message[9:-1]
break
print "KeyCode: ", hex_keyCode
binary_keyCode = base64.b16decode(hex_keyCode.upper())
while(1):
print "ARM Commands: "
print "1 - Start"
print "2 - Stop"
c = '0'
while c!='1' and c!='2':
c = raw_input('Input:')
print 'Sending the arm command...'
for i in range(0, len(binary_keyCode)):
ser.write(binary_keyCode[i])
ser.flush()
ser.write(c.encode())
ser.write('\n'.encode())
ser.flush()
print 'Board response: %s' % readSerial()
| prplfoundation/prpl-hypervisor | bin/board-control.py | Python | isc | 1,093 | 0.013724 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Slurm workload manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import subprocess
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
class SlurmClusterResolver(ClusterResolver):
"""Cluster Resolver for system with Slurm workload manager.
This is an implementation of cluster resolvers for Slurm clusters. This allows
the specification of jobs and task counts, number of tasks per node, number of
GPUs on each node and number of GPUs for each task, It retrieves system
attributes by Slurm environment variables, resolves allocated computing node
names, construct a cluster and return a Cluster Resolver object which an be
use for distributed TensorFlow.
"""
def _resolve_hostnames(self):
"""Resolve host names of nodes allocated in current jobs.
Returns:
A list of node names as strings.
"""
hostlist = (subprocess.check_output(['scontrol', 'show', 'hostname']).
decode('utf-8').strip().split('\n'))
return hostlist
def __init__(self,
jobs,
port_base=8888,
gpus_per_node=1,
gpus_per_task=1,
tasks_per_node=None,
auto_set_gpu=True,
rpc_layer='grpc'):
"""Creates a new SlurmClusterResolver object.
This takes in parameters and creates a SlurmClusterResolver object. It uses
those parameters to check which nodes will processes reside and resolves
their hostnames. With the number of the GPUs on each node and number of GPUs
for each task it offsets the port number for each processes and allocate
GPUs to tasks by setting environment variables. The resolver currently
supports homogeneous tasks and default Slurm process allocation.
Args:
jobs: Dictionary with job names as key and number of tasks in the job as
value
port_base: The first port number to start with for processes on a node.
gpus_per_node: Number of GPUs available on each node.
gpus_per_task: Number of GPUs to be used for each task.
tasks_per_node: Number of tasks to run on each node, if not set defaults
to Slurm's output environment variable SLURM_NTASKS_PER_NODE.
auto_set_gpu: Set the visible CUDA devices automatically while resolving
the cluster by setting CUDA_VISIBLE_DEVICES environment variable.
Defaults to True.
rpc_layer: (Optional) The protocol TensorFlow uses to communicate between
nodes. Defaults to 'grpc'.
Returns:
A ClusterResolver object which can be used with distributed TensorFlow.
Raises:
RuntimeError: If requested more GPUs per node then available or requested
more tasks then assigned tasks.
"""
# check if launched by mpirun
if 'OMPI_COMM_WORLD_RANK' in os.environ:
self._rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
num_tasks = int(os.environ['OMPI_COMM_WORLD_SIZE'])
else:
self._rank = int(os.environ['SLURM_PROCID'])
num_tasks = int(os.environ['SLURM_NTASKS'])
self._jobs = collections.OrderedDict(sorted(jobs.items()))
self._port_base = port_base
# user specification overrides SLURM specification
if tasks_per_node is not None:
self._tasks_per_node = tasks_per_node
elif tasks_per_node is None and 'SLURM_NTASKS_PER_NODE' in os.environ:
self._tasks_per_node = int(os.environ['SLURM_NTASKS_PER_NODE'])
else:
raise RuntimeError('Neither `tasks_per_node` or '
'SLURM_NTASKS_PER_NODE is set.')
self._gpus_per_node = gpus_per_node
self._gpus_per_task = gpus_per_task
self._auto_set_gpu = auto_set_gpu
self.task_type = None
self.task_index = None
self.rpc_layer = rpc_layer
self._gpu_allocation = []
self._cluster_allocation = {}
if self._tasks_per_node * self._gpus_per_task > self._gpus_per_node:
raise RuntimeError('Requested more GPUs per node then available.')
if sum(self._jobs.values()) != num_tasks:
raise RuntimeError('Requested more tasks then assigned tasks.')
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified initialization parameters and Slurm environment variables. The
cluster specification is resolved each time this function is called. The
resolver extract hostnames of nodes by scontrol and pack tasks in that
order until a node a has number of tasks that is equal to specification.
GPUs on nodes are allocated to tasks by specification through setting
CUDA_VISIBLE_DEVICES environment variable.
Returns:
A ClusterSpec containing host information retrieved from Slurm's
environment variables.
"""
hostlist = self._resolve_hostnames()
task_list = []
self._gpu_allocation = []
self._cluster_allocation = {}
for host in hostlist:
for port_offset, gpu_offset in zip(
range(self._tasks_per_node),
range(0, self._gpus_per_node, self._gpus_per_task)):
host_addr = '%s:%d' % (host, self._port_base + port_offset)
task_list.append(host_addr)
gpu_id_list = []
for gpu_id in range(gpu_offset, gpu_offset + self._gpus_per_task):
gpu_id_list.append(str(gpu_id))
self._gpu_allocation.append(','.join(gpu_id_list))
cluster_rank_offset_start = 0
cluster_rank_offset_end = 0
for task_type, num_tasks in self._jobs.items():
cluster_rank_offset_end = cluster_rank_offset_start + num_tasks
self._cluster_allocation[task_type] = (
task_list[cluster_rank_offset_start:cluster_rank_offset_end])
if cluster_rank_offset_start <= self._rank < cluster_rank_offset_end:
self.task_type = task_type
self.task_index = self._rank - cluster_rank_offset_start
cluster_rank_offset_start = cluster_rank_offset_end
if self._auto_set_gpu is True:
os.environ['CUDA_VISIBLE_DEVICES'] = self._gpu_allocation[self._rank]
return ClusterSpec(self._cluster_allocation)
def get_task_info(self):
"""Returns job name and task_index for the process which calls this.
This returns the job name and task index for the process which calls this
function according to its rank and cluster specification. The job name and
task index are set after a cluster is constructed by cluster_spec otherwise
defaults to None.
Returns:
A string specifying job name the process belongs to and an integner
specifying the task index the process belongs to in that job.
"""
return self.task_type, self.task_index
def master(self, task_type=None, task_index=None, rpc_layer=None):
"""Returns the master string for connecting to a TensorFlow master.
Args:
task_type: (Optional) Overrides the default auto-selected task type.
task_index: (Optional) Overrides the default auto-slected task index.
rpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses
to communicate across nodes.
Returns:
A connection string for connecting to a TensorFlow master.
"""
task_type = task_type if task_type is not None else self.task_type
task_index = task_index if task_index is not None else self.task_index
rpc_layer = rpc_layer or self.rpc_layer
master = self.cluster_spec().task_address(task_type, task_index)
return '%s://%s' % (rpc_layer, master) if rpc_layer else master
@property
def environment(self):
"""Returns the current environment which TensorFlow is running in.
For users in the Slurm environment, the environment property is always an
empty string, and Google users will not use this ClusterResolver for running
on internal systems.
"""
return ''
def num_accelerators_per_worker(self, session_config=None):
del session_config # Unused, since this is set in __init__ manually.
return self._gpus_per_node
| hehongliang/tensorflow | tensorflow/contrib/cluster_resolver/python/training/slurm_cluster_resolver.py | Python | apache-2.0 | 8,923 | 0.003474 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
class ServiceFilterAction(tables.FilterAction):
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
if q in service.type.lower():
return True
return False
return filter(comp, services)
def get_stats(service):
return template.loader.render_to_string('admin/services/_stats.html',
{'service': service})
def get_enabled(service, reverse=False):
options = ["Enabled", "Disabled"]
if reverse:
options.reverse()
# if not configured in this region, neither option makes sense
if service.host:
return options[0] if not service.disabled else options[1]
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', verbose_name=_('Id'), hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', verbose_name=_('Host'))
enabled = tables.Column(get_enabled,
verbose_name=_('Enabled'),
status=True)
class Meta:
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["enabled"]
| kickstandproject/wildcard | wildcard/dashboards/admin/info/tables.py | Python | apache-2.0 | 2,073 | 0 |
from ogcserver.cgiserver import Handler
from jon import fcgi
class OGCServerHandler(Handler):
configpath = '/path/to/ogcserver.conf'
fcgi.Server({fcgi.FCGI_RESPONDER: OGCServerHandler}).run()
| pbabik/OGCServer | conf/fcgi_app.py | Python | bsd-3-clause | 198 | 0.010101 |
#!/usr/bin/env python
"""
Test module for demo.py.
Runs various tests on the demo module. Simply run this module to test
the demo.py module.
"""
import test
import demo
def test_echo():
print("In echo test")
echo = demo.echo("hej")
test.assert_equal("hej", echo)
test.assert_not_equal(None, echo)
def test_add():
print("In add test")
added = demo.add("hej ", "hopp")
test.assert_equal("hej hopp", added)
test.assert_not_equal("hej", added)
def run_module_tests():
test.run_tests([test_echo,
test_add])
if __name__ == "__main__":
run_module_tests()
| diblaze/TDP002 | 2.3/testdemo/test/demo_test.py | Python | mit | 614 | 0.006515 |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from numpy.testing import (run_module_suite, assert_almost_equal,
assert_allclose)
import pywt
def test_centrfreq():
# db1 is Haar function, frequency=1
w = pywt.Wavelet('db1')
expected = 1
result = pywt.centfrq(w, precision=12)
assert_almost_equal(result, expected, decimal=3)
# db2, frequency=2/3
w = pywt.Wavelet('db2')
expected = 2/3.
result = pywt.centfrq(w, precision=12)
assert_almost_equal(result, expected)
def test_scal2frq_scale():
scale = 2
delta = 1
w = pywt.Wavelet('db1')
expected = 1. / scale
result = pywt.scal2frq(w, scale, delta, precision=12)
assert_almost_equal(result, expected, decimal=3)
def test_scal2frq_delta():
scale = 1
delta = 2
w = pywt.Wavelet('db1')
expected = 1. / delta
result = pywt.scal2frq(w, scale, delta, precision=12)
assert_almost_equal(result, expected, decimal=3)
def test_intwave_orthogonal():
w = pywt.Wavelet('db1')
int_psi, x = pywt.intwave(w, precision=12)
ix = x < 0.5
# For x < 0.5, the integral is equal to x
assert_allclose(int_psi[ix], x[ix])
# For x > 0.5, the integral is equal to (1 - x)
# Ignore last point here, there x > 1 and something goes wrong
assert_allclose(int_psi[~ix][:-1], 1 - x[~ix][:-1], atol=1e-10)
if __name__ == '__main__':
run_module_suite()
| ThomasA/pywt | pywt/tests/test_functions.py | Python | mit | 1,468 | 0 |
# Part of Mead. See LICENSE file for full copyright and licensing details.
import datetime, re
def datetime_convert(time):
"""
Convert time to YYYYY-MM-DD HH:MM:SS
"""
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
if len(_list) >= 6:
year = int(_list[0])
mounth = int(_list[1])
day = int(_list[2])
hour = int(_list[3])
minute = int(_list[4])
second = int(_list[5])
time = datetime.datetime(year, mounth, day, hour, minute, second)
return time
else:
try:
hour = int(_list[0])
minute = int(_list[1])
second = int(_list[2])
time = datetime.datetime(100, 1, 1, hour, minute, second)
return time
except IndexError:
hour = int(_list[0])
minute = int(_list[1])
time = datetime.datetime(hour, minute)
return time
def date_convert(date):
"""
Convert date to YYYY-MM-DD
"""
_date = str(date)
redate = re.compile(r'\W+')
_list = redate.split(_date)
try:
day = int(_list[0])
mounth = int(_list[1])
year = int(_list[2])
date = datetime.date(year, mounth, day)
return date
except ValueError:
day = int(_list[2])
mounth = int(_list[1])
year = int(_list[0])
date = datetime.date(year, mounth, day)
return date
def time_convert(time):
"""
Convert time to HH:MM:SS
"""
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
try:
hour = int(_list[0])
minute = int(_list[1])
second = int(_list[2])
time = datetime.time(hour, minute, second)
return time
except IndexError:
hour = int(_list[0])
minute = int(_list[1])
time = datetime.time(hour, minute)
return time
def convert_in_second(time):
if time:
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
try:
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
second = int(_list[2])
time = hour + minute + second
return time
except IndexError:
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time = hour + minute
return time
else:
time = 0
return time
def add_time(time, retard):
"""
Add time to the current time
"""
time = datetime_convert(time)
if retard:
_time = str(retard)
retime = re.compile(r'\W+')
_list = retime.split(_time)
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time2 = hour + minute
new_time = time + datetime.timedelta(0, time2)
else:
new_time = time
return new_time.time()
def remove_time(time, retard):
time = datetime_convert(time)
if retard:
_time = str(retard)
retime = re.compile(r'\W+')
_list = retime.split(_time)
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time2 = hour + minute
new_time = time - datetime.timedelta(0, time2)
else:
new_time = time
return new_time.time()
def format_date(date, format=None):
"""
Format date
"""
newdate = date.strftime(format)
return newdate
| kennethreitz-archive/mead | mead/core/tools/date_time.py | Python | isc | 3,456 | 0.000289 |
import ctypes
import json
def post_analyzer(HKEY_hKey,
LPCTSTR_lpSubKey,
DWORD_ulOptions,
REGSAM_samDesired,
PHKEY_phkResult,
**kwargs):
lpSubKey = ctypes.c_char_p.from_address(LPCTSTR_lpSubKey)
hKey = ctypes.c_void_p.from_address(HKEY_hKey)
res = []
if (lpSubKey and lpSubKey.value):
result = {'name': 'lpSubKey', 'data': lpSubKey.value}
res.append(result)
if hKey and hKey.value:
result = {}
result['name'] = 'hKey'
if hKey.value == 0x80000000:
result['data'] = 'HKCR'
elif hKey.value == 0x80000001:
result['data'] = 'HKCU'
elif hKey.value == 0x80000002:
result['data'] = 'HKLM'
elif hKey.value == 0x80000003:
result['data'] = 'HKU'
elif hKey.value == 0x80000005:
result['data'] = 'HKCC'
else:
result['data'] = '0x%x' % hKey.value
res.append(result)
return json.dumps(res)
| 0xPhoeniX/MazeWalker | MazeTracer/PyScripts/post_regopenkeyexa.py | Python | lgpl-3.0 | 1,056 | 0.001894 |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ClusterRoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'UnversionedListMeta',
'items': 'list[V1ClusterRole]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'items': 'items'
}
def __init__(self, kind=None, api_version=None, metadata=None, items=None):
"""
V1ClusterRoleList - a model defined in Swagger
"""
self._kind = kind
self._api_version = api_version
self._metadata = metadata
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1ClusterRoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ClusterRoleList.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:return: The api_version of this V1ClusterRoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ClusterRoleList.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1ClusterRoleList.
Standard object's metadata.
:return: The metadata of this V1ClusterRoleList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ClusterRoleList.
Standard object's metadata.
:param metadata: The metadata of this V1ClusterRoleList.
:type: UnversionedListMeta
"""
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1ClusterRoleList.
Items is a list of ClusterRoles
:return: The items of this V1ClusterRoleList.
:rtype: list[V1ClusterRole]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ClusterRoleList.
Items is a list of ClusterRoles
:param items: The items of this V1ClusterRoleList.
:type: list[V1ClusterRole]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1ClusterRoleList.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| detiber/lib_openshift | lib_openshift/models/v1_cluster_role_list.py | Python | apache-2.0 | 6,290 | 0.001272 |
import shelve
db = shelve.open('class-shelve')
for key in db:
print(key, '=>\n)', db[key].name, db[key].pay)
bob = db['bob']
print(bob.lastName())
print(db['tom'].lastName) | ViMiao/PythonLearning | ProgrammingPython/C01/dump_db_classes.py | Python | gpl-3.0 | 177 | 0.00565 |
from setuptools import setup, find_packages
setup(
name = "pymp",
version = "0.1",
url = 'http://www.fort-awesome.net/wiki/pymp',
license = 'MIT',
description = "A very specific case when Python's multiprocessing library doesn't work",
author = 'Erik Karulf',
# Below this line is tasty Kool-Aide provided by the Cargo Cult
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools'],
) | ekarulf/pymp | setup.py | Python | mit | 461 | 0.043384 |
# --------------------------------------------------------
# test_socket.py: Unit tests for socket, select.
# --------------------------------------------------------
# Make sure we can import i2p
import sys; sys.path += ['../../']
import traceback, time, thread, threading, random, copy
from i2p import socket, select
def test_passed(s, msg='OK'):
"""Notify user that the given unit test passed."""
print ' ' + (s + ':').ljust(50) + msg
def verify_html(s):
"""Raise an error if s does not end with </html>"""
assert s.strip().lower()[-7:] == '</html>'
def resolve_test(name='duck.i2p'):
"""Unit test for resolve."""
try:
rname = socket.resolve(name)
except:
print 'Unit test failed for socket.resolve'
traceback.print_exc(); sys.exit()
test_passed('socket.resolve', 'See below')
print ' Use hosts.txt to verify that ' + name + '=' + \
rname[:15] + '...'
def stream_client(dest):
"""Sub-unit test for socket.socket in SOCK_STREAM mode."""
S = socket.socket('Alice', socket.SOCK_STREAM)
S.connect(dest)
S.send('GET / HTTP/1.0\r\n\r\n') # Send request
f = S.makefile() # File object
while True: # Read header
line = f.readline().strip() # Read a line
if line == '': break # Content begins
s = f.read() # Get content
f.close()
S.close()
def stream_client_test():
"""Unit test for socket.socket in SOCK_STREAM mode."""
url = 'duck.i2p'
stream_client('http://' + url + '/')
stream_client(url)
stream_client(url + '/')
stream_client('http://' + url)
stream_client(socket.resolve('http://' + url + '/'))
test_passed('socket.socket stream client')
def packet_test(raw=True):
"""Unit test for socket.socket in SOCK_DGRAM or SOCK_RAW modes."""
try:
multithread_wait_time = 500.0
may_need_increase = False
kwargs = {'in_depth': 0, 'out_depth': 0}
if raw:
C = socket.socket('Carola', socket.SOCK_RAW, **kwargs)
D = socket.socket('Davey', socket.SOCK_RAW, **kwargs)
else:
C = socket.socket('Carol', socket.SOCK_DGRAM, **kwargs)
D = socket.socket('Dave', socket.SOCK_DGRAM, **kwargs)
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # Packets C *should* receive
D_recv = [] # Packets D *should* receive
C_got = [] # Packets C actually got
D_got = [] # Packets D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m packets
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
C.sendto(s, 0, D.dest)
__lock.acquire()
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
D.sendto(s, 0, C.dest)
__lock.acquire()
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available packets.
try: (p, fromaddr) = C.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
__lock.acquire()
if p != None: C_got += [p]
__lock.release()
try: (p, fromaddr) = D.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
__lock.acquire()
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received packets.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available packets.
try: (p, fromaddr) = C.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == D.dest
if p != None: C_got += [p]
try: (p, fromaddr) = D.recvfrom(1000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None and not raw: assert fromaddr == C.dest
if p != None: D_got += [p]
if len(C_got) == len(C_recv) and len(D_got) == len(D_recv):
break
if time.time() >= end_time:
may_need_increase = True
C_got.sort()
D_got.sort()
C_recv.sort()
D_recv.sort()
assert C_got == C_recv
assert D_got == D_recv
C.close()
D.close()
except:
if raw:
print 'Unit test failed for socket.socket (SOCK_RAW).'
print 'Raw packets are not reliable.'
else:
print 'Unit test failed for socket.socket (SOCK_DGRAM).'
print 'Datagram packets are not reliable.'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
if raw:
test_passed('socket.socket (SOCK_RAW)')
else:
test_passed('socket.socket (SOCK_DGRAM)')
def stream_test():
"""Multithreaded unit test for socket.socket (SOCK_STREAM)."""
try:
multithread_wait_time = 200.0
may_need_increase = False
kwargs = {'in_depth':0, 'out_depth':0}
C = socket.socket('Carolic', socket.SOCK_STREAM, **kwargs)
D = socket.socket('David', socket.SOCK_STREAM, **kwargs)
Cout = socket.socket('Carolic', socket.SOCK_STREAM, **kwargs)
Dout = socket.socket('David', socket.SOCK_STREAM, **kwargs)
assert C.dest == Cout.dest
assert D.dest == Dout.dest
C.listen(5)
D.listen(5)
Cout.connect(D.dest)
Dout.connect(C.dest)
(Cin, ignoredest) = C.accept()
(Din, ignoredest) = D.accept()
global C_recv, D_recv, C_got, D_got, __lock
C_recv = [] # String data C *should* receive
D_recv = [] # String data D *should* receive
C_got = [] # String data C actually got
D_got = [] # String data D actually got
n = 50 # Create n threads
m = 40 # Each thread sends m strings
global __done_count
__done_count = 0
__lock = threading.Lock()
# Use C and D to send and read in many different threads.
def f():
# This code is run in each separate thread
global C_recv, D_recv, C_got, D_got, __lock, __done_count
for i in range(m):
# Random binary string of length 2-80.
index_list = range(random.randrange(2, 80))
s = ''.join([chr(random.randrange(256)) for j in index_list])
if random.randrange(2) == 0:
# Send packet from C to D, and log it.
__lock.acquire()
Cout.send(s)
D_recv += [s]
__lock.release()
else:
# Send packet from D to C, and log it.
__lock.acquire()
Dout.send(s)
C_recv += [s]
__lock.release()
time.sleep(0.01*random.uniform(0.0,1.0))
# Read any available string data, non-blocking.
__lock.acquire()
try: p = Cin.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: C_got += [p]
__lock.release()
__lock.acquire()
try: p = Din.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: D_got += [p]
__lock.release()
__lock.acquire()
__done_count += 1
__lock.release()
# Create n threads.
for i in range(n):
threading.Thread(target=f).start()
# Wait for them to finish.
while __done_count < n: time.sleep(0.01)
# Read any left-over received string data.
end_time = time.time() + multithread_wait_time
while time.time() < end_time:
# Read any available string data, non-blocking.
try: p = Cin.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: C_got += [p]
try: p = Din.recv(100000, socket.MSG_DONTWAIT)
except socket.BlockError: p = None
if p != None: D_got += [p]
if len(''.join(C_got)) == len(''.join(C_recv)) and \
len(''.join(D_got)) == len(''.join(D_recv)):
break
if time.time() >= end_time:
may_need_increase = True
C_got = ''.join(C_got)
D_got = ''.join(D_got)
C_recv = ''.join(C_recv)
D_recv = ''.join(D_recv)
assert C_got == C_recv
assert D_got == D_recv
Cin.close()
Din.close()
Cout.close()
Dout.close()
C.close()
D.close()
except:
print 'Unit test failed for socket.socket ' + \
'(SOCK_STREAM, multithreaded).'
if may_need_increase:
print 'Try increasing multithread_wait_time.'
traceback.print_exc(); sys.exit()
test_passed('socket.socket (SOCK_STREAM, multithreaded)')
def noblock_stream_test():
"""Unit test for non-blocking stream commands and listen."""
kwargs = {'in_depth': 0, 'out_depth': 0}
serv = socket.socket('Allison',socket.SOCK_STREAM,**kwargs)
serv.setblocking(False)
serv.listen(100)
assert serv.gettimeout() == 0.0
msg_to_client = 'Hi, client!!!!'
msg_to_server = 'Hi, server!'
nconnects = 5
global server_done, client_count, client_lock
server_done = False
client_count = 0
client_lock = threading.Lock()
def serv_func(n = nconnects):
while True:
try:
(C, ignoredest) = serv.accept()
C.send(msg_to_client)
rmsg = C.recv(len(msg_to_server), socket.MSG_WAITALL)
if rmsg != msg_to_server:
raise ValueError('message should have been: ' +
repr(msg_to_server) + ' was: ' + repr(rmsg))
C.close()
n -= 1
if n == 0: break
except socket.BlockError:
pass
time.sleep(0.01)
global server_done
server_done = True
def client_func():
# FIXME: i2p.socket.NetworkError('TIMEOUT', '') errors are produced
# for our streams if we use '' for all clients. Why?
C = socket.socket('Bobb', socket.SOCK_STREAM, **kwargs)
C.setblocking(False)
try:
C.connect(serv.dest)
except socket.BlockError:
# One could also use timeout=0.1 and loop
(Rlist, Wlist, Elist) = select.select([C], [C], [C])
if len(Elist) > 0:
assert Elist[0] == C
raise Elist[0].sessobj.err
C.send(msg_to_server)
C.setblocking(True)
rmsg = C.recv(len(msg_to_client), socket.MSG_WAITALL)
if rmsg != msg_to_client:
raise ValueError('message should have been: ' +
repr(msg_to_client) + ' was: ' + repr(rmsg))
C.close()
global client_count, client_lock
# Synchronized
client_lock.acquire()
try: client_count += 1
finally: client_lock.release()
thread.start_new_thread(serv_func, ())
for i in range(nconnects):
thread.start_new_thread(client_func, ())
while True:
if server_done and client_count == nconnects: break
time.sleep(0.01)
test_passed('socket.listen (SOCK_STREAM), and non-blocking IO')
def multi_stream_test(n):
"""See if we can have n streams open at once."""
server = None
client = [None] * n
kwargs = {'in_depth': 0, 'out_depth': 0}
server = socket.socket('Aligi',socket.SOCK_STREAM,**kwargs)
server.listen(n)
for i in range(n):
client[i] = socket.socket('Bobo', socket.SOCK_STREAM, \
in_depth=0, out_depth=0)
for i in range(n):
client[i].connect(server.dest)
client[i].send('Hi')
for i in range(n):
client[i].close()
server.close()
test_passed(str(n) + ' streams open at once')
# Todo:
# select, poll
# More nonblocking unit tests
def test():
print 'Testing:'
print "Comment and uncomment tests manually, if they don't finish."
resolve_test()
noblock_stream_test()
stream_client_test()
packet_test(raw=True)
packet_test(raw=False)
stream_test()
multi_stream_test(200)
if __name__ == '__main__':
test()
| metamarcdw/PyBitmessage-I2P | src/i2p/test/test_socket.py | Python | mit | 12,449 | 0.019038 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsagesOperations(object):
"""UsagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.network.v2017_11_01.models.UsagePaged[~azure.mgmt.network.v2017_11_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/usages_operations.py | Python | mit | 4,282 | 0.002102 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0012_article_organisations'),
]
operations = [
migrations.AlterField(
model_name='article',
name='organisations',
field=models.ManyToManyField(default=None, help_text='Organisation/organisationer som artikeln hör till',
blank=True, to='organisations.Organisation', verbose_name='organisationer'),
),
]
| I-sektionen/i-portalen | wsgi/iportalen_django/articles/migrations/0013_auto_20151021_0155.py | Python | mit | 598 | 0.00335 |
# lpbm/exceptions.py - All the errors that can be raised in the program.
# Author: Franck Michea < franck.michea@gmail.com >
# License: New BSD License (See LICENSE)
class GeneralOptionError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
msg = 'Could not find or call any function for `--{}` options.'
return msg.format(self.name)
class IdOptionError(GeneralOptionError):
pass
class IdOptionMissingError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Id must be precised when using option `--{}`.'.format(self.name)
class ObjectNotFound(Exception):
def __init__(self, id, name):
self.id, self.name = id, name
def __str__(self):
return 'There is no {} with this id ({}).'.format(self.name, self.id)
# Field Errors
class FieldReadOnlyError(Exception):
def __str__(self):
return 'Cannot assign read-only value.'
class FieldRequiredError(Exception):
def __str__(self):
return 'Field is required and cannot be set to empty value None.'
class ConfigOptionArgsError(Exception):
def __str__(self):
msgs = [
'ConfigOptionField.__init__ takes one or two arguments.',
'See documentation for more details.',
]
return ' '.join(msgs)
# Model Errors
class AttributeNotAFieldError(Exception):
def __init__(self, attr_name):
self.attr_name = attr_name
def __str__(self):
msg = 'Attribute `{attr_name}` is not a field. You must implement '
msg += '`interactive_{attr_name}` if you want it to be interactive.'
return msg.format(attr_name=self.attr_name)
class ModelDoesNotExistError(Exception):
def __init__(self, object_name, id):
self.object_name, self.id = object_name, id
def __str__(self):
return 'There is no such {object_name} (id = {id}).'.format(
object_name=self.object_name, id=self.id
)
| fmichea/lpbm | lpbm/exceptions.py | Python | bsd-3-clause | 2,013 | 0.000497 |
"""
.. currentmodule:: pylayers.antprop.diff
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
import numpy as np
import scipy.special as sps
import matplotlib.pyplot as plt
import pdb
def diff(fGHz,phi0,phi,si,sd,N,mat0,matN,beta=np.pi/2):
""" Luebbers Diffration coefficient
Parameters
----------
fGHz
phi0
phi
si
sd
N
mat0
matN
beta : float
skew incidence angle (rad)
Examples
--------
>>> import numpy as np
>>> from pylayers.antprop.slab import *
>>> fGHz = 3.
>>> N = 320/180.
>>> #phi = 40*np.pi/180.
>>> phi0 = np.linspace(0,N*np.pi,500)
>>> phi = np.linspace(0,3*np.pi/2,100)
>>> dm = MatDB()
>>> mat0 = dm['WOOD']
>>> matN = dm['WOOD']
>>> si = 1
>>> sd = 1
>>> Ds,Dh = Coediff(fGHz,phi0,phi,si,sd,N,mat0,matN)
"""
if not isinstance(fGHz,np.ndarray):
fGHz = np.array([fGHz])
if not isinstance(phi0,np.ndarray):
phi0 = np.array([phi0])
if not isinstance(phi,np.ndarray):
phi = np.array([phi])
if not isinstance(si,np.ndarray):
si = np.array([si])
if not isinstance(sd,np.ndarray):
sd = np.array([sd])
if not isinstance(N,np.ndarray):
N = np.array([N])
if not isinstance(beta,np.ndarray):
beta = np.array([beta])
fGHz = fGHz[:,None,None,None,None,None,None]
phi0 = phi0[None,:,None,None,None,None,None]
phi = phi[None,None,:,None,None,None,None]
si = si[None,None,None,:,None,None,None]
sd = sd[None,None,None,None,:,None,None]
N = N[None,None,None,None,None,:,None]
beta = beta[None,None,None,None,None,None,:]
L = si*sd/(si+sd)
k = 2*np.pi*fGHz/0.3
#--------------------------------------------------
# R on faces 'o' and 'n'
#--------------------------------------------------
tho = np.empty((phi0.shape[1],phi.shape[2],N.shape[5]))[None,:,:,None,None,:,None]
thn = np.empty((phi0.shape[1],phi.shape[2],N.shape[5]))[None,:,:,None,None,:,None]
PHI0 = phi0 * np.ones(phi.shape)*np.ones(N.shape)
PHI = np.ones(phi0.shape)*phi*np.ones(N.shape)
BN = np.ones(phi0.shape)*np.ones(phi.shape)*N
c1 = PHI>PHI0
c2 = ~c1
tho[c1] = PHI0[c1]
thn[c1] = BN[c1]*np.pi-PHI[c1]
tho[c2] = PHI[c2]
thn[c2] = BN[c2]*np.pi-PHI0[c2]
er0 = np.real(mat0['epr'])
err0 = np.imag(mat0['epr'])
ur0 = np.real(mat0['mur'])
urr0 = np.imag(mat0['mur'])
sigma0 = mat0['sigma']
deltah0 = mat0['roughness']
erN = np.real(matN['epr'])
errN = np.imag(matN['epr'])
urN = np.real(mat0['mur'])
urrN = np.imag(mat0['mur'])
sigmaN = matN['sigma']
deltahN = matN['roughness']
Rsofto,Rhardo = R(tho,k,er0,err0,sigma0,ur0,urr0,deltah0)
Rsoftn,Rhardn = R(thn,k,erN,errN,sigmaN,urN,urrN,deltahN)
#--------------------------------------------------
# grazing angle Go et Gn
#--------------------------------------------------
Gsofto,Gsoftn = G(N,phi0,Rsofto,Rsoftn)
Ghardo,Ghardn = G(N,phi0,Rhardo,Rhardn)
#--------------------------------------------------
#calcul des 4 termes du coeff diff
#--------------------------------------------------
sign = 1.0
D1 = Dfunc(sign,k,N,phi-phi0,si,sd,beta)
sign = -1.0
D2 = Dfunc(sign,k,N,phi-phi0,si,sd,beta)
sign = +1.0
D3 = Dfunc(sign,k,N,phi+phi0,si,sd,beta)
sign = -1.0
D4 = Dfunc(sign,k,N,phi+phi0,si,sd,beta)
#--------------------------------------
#n>=1 : exterior wedge
#--------------------------------------
Dsoft =np.empty(np.shape(D1),dtype=complex)
Dhard =np.empty(np.shape(D1),dtype=complex)
#c1 = BN>=1.0
Dsoft = D1+D2+Rsoftn*D3+Rsofto*D4
Dhard = D1+D2+Rhardn*D3+Rhardo*D4
# Dsoft = D2-D4
# Dhard = D2+D4
#Dsoft = D1+D2-D3-D4
#Dhard = D1+D2+D3+D4
# Dsoft = Gsoftn*(D1+Rsoftn*D3)+Gsofto*(D2+Rsofto*D4)
# Dhard = Ghardn*(D1+Rhardn*D3)+Ghardo*(D2+Rhardo*D4)
# c1 = abs(Gsoftn+1.0) < 1e-6
# c2 = abs(Gsofto+1.0) < 1e-6
# c3 = abs(Ghardn+1.0) < 1e-6
# c4 = abs(Ghardo+1.0) < 1e-6
#
# Dsoft[c1]= 0.5*(D1[c1]+D3[c1])+Gsofto[c1]*(D2[c1]+Rsofto[c1]*D4[c1])
# Dsoft[c2]= Gsoftn[c2]*(D1[c2]+Rsoftn[c2]*D3[c2])+0.5*(D2[c2]+D4[c2])
# Dhard[c3]= 0.5*(D1[c3]+D3[c3])+Ghardo[c3]*(D2[c3]+Rhardo[c3]*D4[c3])
# Dhard[c4]= Ghardn[c4]*(D1[c4]+Rhardn[c4]*D3[c4])+0.5*(D2[c4]+D4[c4])
#--------------------------------------
#traitement des cas ou Go (ou Gn) = -1
#--------------------------------------
# if (abs(Gsoftn+1.0) < 1e-6):
# DTsoft = 0.5*(D1+D3)+Gsofto*(D2+Rsofto*D4)
#
# if (abs(Gsofto+1.0)<1e-6):
# DTsoft = Gsoftn*(D1+Rsoftn*D3)+0.5*(D2+D4)
#
# if (abs(Ghardn+1.0) < 1.0e-6):
# DThard = 0.5*(D1+D3)+Ghardo*(D2+Rhardo*D4)
#
# if (abs(Ghardo+1.0)<1e-6):
# DThard = Ghardn*(D1+Rhardn*D3)+0.5*(D2+D4)
#
##--------------------------------------
##cas ou n<1 : interior wedge
##--------------------------------------
# else:
#
# thoz = N*np.pi-tho
# thnz = N*np.pi-thn
#
#
# [Rsoftnz,Rhardnz] = R(thnz,k,ero,erro,condo,uro,deltaho)
# [Rsoftoz,Rhardoz] = R(thoz,k,ern,errn,condn,urn,deltahn)
#
# DTsoft = Rsoftoz*Rsoftnz*D1+Rsoftn*D3+(Rsofto*Rsoftn*D2+Rsofto*D4)
#
# DThard = Rhardoz*Rhardnz*D1+Rhardn*D3+(Rhardo*Rhardn*D2+Rhardo*D4)
return Dsoft,Dhard,D1,D2,D3,D4
def G(N,phi0,Ro,Rn):
""" grazing angle correction
Parameters
----------
N : wedge parameter
phi0 : incidence angle (rad)
Ro : R coefficient on face o
Rn : R coefficient on face n
Luebbers 89 "a heuristique UTD slope diffraction coefficient for
rough lossy wedges"
"""
if not isinstance(phi0,np.ndarray):
phi0 = np.array([phi0])
if not isinstance(N,np.ndarray):
N = np.array([N])
PHI0 = phi0 * np.ones(Ro.shape)
BN = N * np.ones(Ro.shape)
# face o
Go = np.ones(np.shape(Ro))
c1 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)>1.0e-6)
c2 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)<1.0e-6)
c3 = abs(PHI0-BN*np.pi) < 1.0e-6
Go[c1] = 1.0/(1.0+Ro[c1])
Go[c2] = -1.
Go[c3] = 0.5
# face n
Gn = np.ones(np.shape(Rn))
c1 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)>1.0e-6)
c2 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)<1.0e-6)
c3 = abs(PHI0) < 1.0e-6
Gn[c1] = 1.0/(1.0+Rn[c1])
Gn[c2] = -1.
Gn[c3] = 0.5
return Go,Gn
def Dfunc(sign,k,N,dphi,si,sd,beta=np.pi/2):
"""
Parameters
----------
sign : int
+1 | -1
k : wave number
N : wedge parameter
dphi : phi-phi0 or phi+phi0
si : distance source-D
sd : distance D-observation
beta : skew incidence angle
Reference
---------
[1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge
in a perfectly conducting surface" IEEE AP nov 74 vol 62 N11
Notes
-----
e-jnp.pi/4 1
Di= ------------------ * ----------- * F(kla) ([1] eq 25)
2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta)
"""
cste = (1.0-1.0*1j)*(1.0/(4.0*N*np.sqrt(k*np.pi)*np.sin(beta)))
rnn = (dphi+np.pi*sign)/(2.0*N*np.pi)
nn = np.zeros(np.shape(rnn))
nn[rnn>0.5] = 1
nn[rnn>1.5] = 2
nn[rnn<-0.5] = -1
nn[rnn<-1.5] = -2
# KLA ref[1] eq 27
L = (si*sd)/(1.*(si+sd))
AC = np.cos( (2.0*N*nn*np.pi-dphi) / 2.0 )
A = 2*AC**2
KLA = k * L * A
epsi = AC*2.0
angle = (np.pi+sign*dphi)/(2.0*N)
tan = np.tan(angle)
Di = np.empty(KLA.shape)
Fkla,ys,yL = FreF(KLA)
# 4.56 Mac Namara
Di = -cste*Fkla/tan
c5 = np.where(np.abs(tan)<1e-9)
BL = np.ones(Di.shape)*L
Di[c5] = 0.5*np.sqrt(BL[c5])
return(Di)
def FresnelI(x) :
""" calculates Fresnel integral
Parameters
----------
x : array
real argument
"""
v = np.zeros(x.shape,dtype=complex)
y = np.abs(x)
z = .25*y
u1 = np.where(z>1)
u2 = np.where(z<=1)
y1 = y[u1]
y2 = y[u2]
d1 = np.cos(y1)
d2 = np.cos(y2)
e1 = np.sin(y1)
e2 = np.sin(y2)
z1 = z[u1]
z2 = z[u2]
c1 = np.sqrt(z1)
c2 = np.sqrt(z2)
# ----------------------------------------
# x>4, z>1
# ----------------------------------------
v1 = 0.5 - 0.5*1j
c1 = (1.0)/c1
z1 = c1*c1
a1=((((((((((
.23393900e-3*z1 -.12179300e-2)*z1 +.21029670e-2)*z1
+.2464200e-3)*z1 -.67488730e-2)*z1 +.11948809e-1)*z1
-.9497136e-2)*z1 +.68989200e-3)*z1 +.57709560e-2)*z1
+.3936000e-5)*z1 -.24933975e-1)*z1*c1
b1=(((((((((((
.838386000e-3*z1 -.55985150e-2)*z1 +.16497308e-1)*z1
-.27928955e-1)*z1 +.29064067e-1)*z1 -.17122914e-1)*z1
+.19032180e-2)*z1 +.48514660e-2)*z1 +.23006000e-4)*z1
-.93513410e-2)*z1 +.23000000e-7)*z1 +.19947114000)*c1
# ----------------------------------------
# x<4, z<1
# ----------------------------------------
a2=(((((((((((
0.34404779e-1 *z2 - 0.15023096)*z2 - 0.25639041e-1)*z2
+0.850663781 )*z2 - 0.75752419e-1 )*z2 - 0.305048566e1)*z2
-0.16898657e-1 )*z2 + 0.6920691902e1)*z2 - 0.576361e-3 )*z2
-0.6808568854e1)*z2 - 0.1702e-5)*z2 + 0.159576914e1)*c2
b2=(((((((((((
.19547031e-1 *z2 -.216195929e0 )*z2 +.702222016e0)*z2
-.4033492760e0)*z2 -.1363729124e1)*z2 -.138341947e0)*z2
+.5075161298e1)*z2 -.952089500e-2)*z2 -.778002040e1)*z2
-.928100000e-4)*z2 +.4255387524e1)*z2 -.33000000e-7)*c2
w1 = a1*d1+b1*e1+ 1j*(b1*d1-a1*e1) + v1
w2 = a2*d2+b2*e2+ 1j*(b2*d2-a2*e2)
v[u1] = w1
v[u2] = w2
y = v*(np.sqrt(np.pi/2.0))
return y
def FreF(x) :
""" F function from Pathack
Parameters
----------
x : array
real argument
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> x = np.logspace(-4,2,400);
>>> F = FreF(x)
>>> plt.semilogx(x,np.abs(F))
>>> plt.grid()
"""
ejp4 = np.exp(1j*np.pi/4)
emjp4 = np.exp(-1j*np.pi/4)
y = np.zeros(x.shape,dtype=complex)
u1 = np.where(x>10)[0]
u2 = np.where(x<=10)[0]
xu1 = x[u1]
xu2 = x[u2]
x2 = xu1*xu1
x3 = x2*xu1
x4 = x3*xu1
w1 = 1-0.75/x2+4.6875/x4 + 1j*( 0.5/xu1 -1.875/x3)
cst = (1.0 - 1j )*0.5*np.sqrt(np.pi/2)
carx = abs(xu2)
racx = np.sqrt(carx)
modx = np.mod(xu2,2*np.pi)
expjx = np.exp(1j*modx)
fr = FresnelI(carx)
into = cst - fr
w2 = 2.0*racx*1j*expjx*into
y[u1] = w1
y[u2] = w2
# [1] eq 30
ys = (np.sqrt(np.pi*x)-2*x*ejp4-(2/3.)*x**2*emjp4)*np.exp(1j*(np.pi/4+x))
yl = 1-0.75/(x*x)+4.6875/(x*x*x*x) + 1j*( 0.5/x -1.875/(x*x*x))
return y,ys,yl
def FreF2(x):
""" F function using numpy fresnel function
Parameters
----------
Not working for large argument
"""
y = np.zeros(x.shape,dtype=complex)
u1 = np.where(x>5)[0]
u2 = np.where(x<=5)[0]
xu1 = x[u1]
xu2 = x[u2]
x2 = xu1*xu1
x3 = x2*xu1
x4 = x3*xu1
w1 = 1-0.75/x2+4.6875/x4 + 1j*( 0.5/xu1 -1.875/x3)
cst = np.sqrt(np.pi/2.)
sF,cF = sps.fresnel(np.sqrt(xu2/cst))
Fc = (0.5-cF)*cst
Fs = (0.5-sF)*cst
modx = np.mod(xu2,2*np.pi)
expjx = np.exp(1j*modx)
w2 = 2*1j*np.sqrt(xu2)*expjx*(Fc-1j*Fs)
y[u1] = w1
y[u2] = w2
return(y)
def R(th,k,er,err,sigma,ur,urr,deltah):
""" R coeff
Parameters
----------
th : np.array
incidence angle (axe 0)
k : np.array
wave number (axe 1)
er : real part of permittivity
err : imaginary part of permittivity
sigma : conductivity
ur : real part of permeability
urr : imaginary part of permeability
deltah : height standard deviation
Examples
--------
>>> import numpy as np
>>> th = np.linspace(0,np.pi/2,180)[None,:]
>>> fGHz = 0.3
>>> lamda = 0.3/fGHz
>>> k = np.array([2*np.pi/2])[:,None]
>>> Rs,Rh = R(th,k,9,0,0.01,1,0,0)
"""
cel = 299792458
#--------------------------------------------
#cas des surfaces dielectriques (sinon er=-1)
#--------------------------------------------
if (er >= 0.0 ):
if ( (( ur-1.0)<1e-16) & ((er-1.0)<1e-16) ):
Rs = np.zeros(len(th))
Rh = np.zeros(len(th))
u1 = np.where(th >= 1.5*np.pi)
u2 = np.where(th >= np.pi )
u3 = np.where(th >= 0.5*np.pi)
th[u1] = 2.0*np.pi - th[u1]
th[u2] = th[u2] - np.pi
th[u3] = np.pi - th[u3]
#if (th >= 1.5*np.pi ):
# th = 2.0*np.pi - th
#elif (th >= np.pi ):
# th = th - np.pi
#elif (th >= 0.5*np.pi):
# th = np.pi - th
uo = 4.0*np.pi*1e-7
eo = 1.0/(uo*cel*cel)
pulse = k*cel
permi = (er-1j*err)-(1j*sigma)/(pulse*eo)
perme = ur - 1j*urr
yy = (permi/perme)
st = np.sin(th)
ct = np.cos(th)
bb = np.sqrt(yy-ct**2)
Rs = (st - bb) / (st + bb )
Rh = (yy*st-bb)/(yy*st+bb)
else: # metalic case
Rs = -np.ones(th.shape)
Rh = np.ones(th.shape)
roughness = 1.0
Rs = Rs* roughness
Rh = Rh* roughness
return Rs,Rh
| dialounke/pylayers | pylayers/antprop/diff.py | Python | mit | 13,499 | 0.029632 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Add data_migration table
Revision ID: 2e171e6198e6
Revises: 15d3fad78656
Create Date: 2016-08-03 11:11:55.680872
"""
# revision identifiers, used by Alembic.
revision = '2e171e6198e6'
down_revision = '15d3fad78656'
from alembic import op
from sqlalchemy import Column, Integer, Unicode, DateTime
def upgrade():
op.create_table('data_migration',
Column('id', Integer, primary_key=True),
Column('name', Unicode(255), nullable=False, unique=True),
Column('finish_time', DateTime),
mysql_engine='InnoDB')
def downgrade():
op.drop_table('data_migration')
| jtoppins/beaker | Server/bkr/server/alembic/versions/2e171e6198e6_add_data_migration_table.py | Python | gpl-2.0 | 863 | 0.005794 |
# -*- coding: utf-8 -*-
import logging
import chwrapper
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class CompaniesHouseClient():
def __init__(self):
self._ch = chwrapper.Search()
def get_company_data(self, k, v):
"""Search companies house for the data"""
try:
pc = v['postal_code']
except AttributeError:
logger.warn("No postal code found for {}".format(k))
return []
r = self._ch.search_companies(k, items_per_page=200)
items = r.json()['items']
data = []
for item in items:
try:
if item['address'].get('postal_code') == pc:
data.append(item)
except AttributeError:
logger.info("No address item for {}")
return data
def get_directors(self, k, v):
"""Return directors data for a company number."""
try:
company_number = v.get('company_data')[0].get('company_number')
except IndexError as e:
logger.warn("No Company data found.", e)
return []
if not company_number:
logger.warn("No postal code found for {}".format(k))
return []
r = self._ch.officers(company_number)
items = r.json()['items']
data = []
for item in items:
data.append(item)
return data
ch_client = CompaniesHouseClient()
| nestauk/inet | inet/sources/companies_house.py | Python | mit | 1,473 | 0 |
from django.conf.urls import patterns, url
from .views import PlaylistList, PlaylistDetail, SongList, SongUpdate
urlpatterns = patterns('songs.views',
url(r'^playlists$', PlaylistList.as_view(), name='playlists_list'),
url(r'^playlists/(?P<playlist_pk>[0-9]+)/$', PlaylistDetail.as_view(),
name="playlist_detail"),
url(r'^songs$', SongList.as_view(), name='songs_list'),
url(r'^songs/(?P<song_pk>[0-9]+)/$', SongUpdate.as_view(),
name='songs_update'),
) | kburts/django-playlist | django_playlist/songs/urls.py | Python | mit | 465 | 0.021505 |
# -*- coding: utf-8 -*-
#This is generated code - do not edit
encoding = 'utf-8'
dict = {
'&About...': '&\xd8\xb9\xd9\x86...',
'&Delete Window': '&\xd8\xa7\xd8\xad\xd8\xb0\xd9\x81 \xd8\xa7\xd9\x84\xd9\x86\xd8\xa7\xd9\x81\xd8\xb0\xd8\xa9',
'&Describe Action': '&\xd8\xa3\xd9\x88\xd8\xb5\xd9\x81 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x85\xd9\x84\xd9\x8a\xd8\xa9',
'&Execute Action': '&\xd9\x86\xd9\x81\xd8\xb0 \xd8\xa7\xd9\x84\xd8\xb9\xd9\x85\xd9\x84\xd9\x8a\xd8\xa9',
'&Folding': '&\xd8\xa7\xd9\x84\xd8\xb7\xd9\x8a',
'&Help': '&\xd9\x85\xd8\xb3\xd8\xa7\xd8\xb9\xd8\xaf\xd8\xa9',
'&Line Numbers': '&\xd8\xb9\xd8\xaf\xd8\xaf \xd8\xa7\xd9\x84\xd8\xb3\xd8\xb7\xd9\x88\xd8\xb1',
'&New Window': '&\xd9\x86\xd8\xa7\xd9\x81\xd8\xb0\xd8\xa9 \xd8\xac\xd8\xaf\xd9\x8a\xd8\xaf\xd8\xa9',
'&Preferences...': '&\xd8\xa7\xd9\x84\xd8\xaa\xd9\x81\xd8\xb6\xd9\x8a\xd9\x84\xd8\xa7\xd8\xaa...',
'&Revert': '&\xd8\xa5\xd8\xb3\xd8\xaa\xd8\xb1\xd8\xac\xd8\xb9',
'&Save...': '&\xd8\xad\xd9\x81\xd8\xb8...',
'&Show Toolbars': '&\xd8\xb9\xd8\xb1\xd8\xb6 \xd8\xb4\xd8\xb1\xd9\x8a\xd8\xb7 \xd8\xa7\xd9\x84\xd8\xa3\xd8\xaf\xd9\x88\xd8\xa7\xd8\xa9',
'&Word Count': '&\xd8\xb9\xd8\xaf \xd8\xa7\xd9\x84\xd9\x83\xd9\x84\xd9\x85\xd8\xa7\xd8\xaa',
'About this program': '\xd8\xad\xd9\x88\xd9\x92\xd9\x84 \xd9\x87\xd8\xb0\xd8\xa7 \xd8\xa7\xd9\x84\xd8\xa8\xd8\xb1\xd9\x86\xd8\xa7\xd9\x85\xd8\xac',
'Actions': '\xd8\xa5\xd8\xac\xd8\xb1\xd8\xa7\xd8\xa1\xd8\xa7\xd8\xaa',
'Attributes': '\xd8\xa7\xd9\x84\xd8\xb5\xd9\x91\xd9\x81\xd8\xa7\xd8\xaa',
'Background': '\xd8\xa7\xd9\x84\xd8\xae\xd9\x84\xd9\x81\xd9\x8a\xd9\x91\xd8\xa9',
'Cancel': '\xd8\xa5\xd9\x84\xd8\xba\xd8\xa7\xef\xba\x80',
'Case': '\xd8\xa7\xd9\x84\xd8\xad\xd8\xa7\xd9\x84\xd8\xa9',
'Clear Playlist': '\xd9\x85\xd8\xb3\xd8\xad \xd9\x82\xd8\xa7\xd8\xa6\xd9\x85\xd8\xa9 \xd8\xa7\xd9\x84\xd8\xaa\xd8\xb4\xd8\xba\xd9\x8a\xd9\x84',
'Close Tab': '\xd8\xa3\xd8\xba\xd9\x84\xd9\x82 \xd8\xa7\xd9\x84\xd9\x84\xd8\xb3\xd8\xa7\xd9\x86',
'Close the current tab': '\xd8\xa3\xd8\xba\xd9\x84\xd9\x82 \xd8\xa7\xd9\x84\xd9\x84\xd8\xb3\xd8\xa7\xd9\x86 \xd8\xa7\xd9\x84\xd8\xad\xd8\xa7\xd9\x84\xd9\x8a',
'Color': '\xd8\xa7\xd9\x84\xd9\x84\xd9\x88\xd9\x86',
'Contrast': '\xd8\xa7\xd9\x84\xd8\xaa\xd8\xa8\xd8\xa7\xd9\x8a\xd9\x86',
'Copy': '\xd9\x86\xd8\xb3\xd8\xae',
'Cut': '\xd9\x82\xd8\xb5',
'Debug': '\xd8\xaa\xd9\x86\xd9\x82\xd9\x8a\xd8\xad',
'Documents': '\xd8\xa7\xd9\x84\xd9\x85\xd8\xb3\xd8\xaa\xd9\x86\xd8\xaf\xd8\xa7\xd8\xaa',
'E&xit': '&\xd8\xae\xd8\xb1\xd9\x88\xd8\xac',
}
| robmcmullen/peppy | peppy/i18n/ar.py | Python | gpl-2.0 | 2,474 | 0.017785 |
from ndlib.models.compartments.Compartment import Compartiment
import networkx as nx
import numpy as np
import operator
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class NodeNumericalAttribute(Compartiment):
def __init__(self, attribute, value=None, op=None, probability=1, **kwargs):
super(self.__class__, self).__init__(kwargs)
self.__available_operators = {"==": operator.__eq__, "<": operator.__lt__,
">": operator.__gt__, "<=": operator.__le__,
">=": operator.__ge__, "!=": operator.__ne__,
"IN": (operator.__ge__, operator.__le__)}
self.attribute = attribute
self.attribute_range = value
self.probability = probability
self.operator = op
if self.attribute_range is None:
raise ValueError("A valid attribute value must be provided")
if self.operator is not None and self.operator in self.__available_operators:
if self.operator == "IN":
if not isinstance(self.attribute_range, list) or self.attribute_range[1] < self.attribute_range[0]:
raise ValueError("A range list is required to test IN condition")
else:
if not isinstance(self.attribute_range, int):
if not isinstance(self.attribute_range, float):
raise ValueError("A numeric value is required to test the selected condition")
else:
raise ValueError("The operator provided '%s' is not valid" % operator)
def execute(self, node, graph, status, status_map, *args, **kwargs):
val = nx.get_node_attributes(graph, self.attribute)[node]
p = np.random.random_sample()
if self.operator == "IN":
condition = self.__available_operators[self.operator][0](val, self.attribute_range[0]) and \
self.__available_operators[self.operator][1](val, self.attribute_range[1])
else:
condition = self.__available_operators[self.operator](val, self.attribute_range)
test = condition and p <= self.probability
if test:
return self.compose(node, graph, status, status_map, kwargs)
return False
| GiulioRossetti/ndlib | ndlib/models/compartments/NodeNumericalAttribute.py | Python | bsd-2-clause | 2,351 | 0.005104 |
# PyAnimation - Animation, in a terminal.
# Copyright (C) 2015 Nathaniel Olsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import os
EngineVersion = "0.3-indev"
def clear():
os.system('clear')
# The Legacy wait for legacy or outdated programs.
def legacywait():
time.sleep(0.4)
def legacywait2():
time.sleep(0.2)
def legacywait3():
time.sleep(0.1)
# The new wait.
def waitpoint1():
time.sleep(0.1)
def waitpoint2():
time.sleep(0.2)
def waitpoint3():
time.sleep(0.3)
def waitpoint4():
time.sleep(0.4)
def waitpoint5():
time.sleep(0.5)
def waitpoint6():
time.sleep(0.6)
def wait1():
time.sleep(1)
def wait2():
time.sleep(2)
def wait3():
time.sleep(3)
def loading_screen():
while True:
print("L")
waitpoint1()
clear()
print("Lo")
waitpoint1()
clear()
print("Loa")
waitpoint1()
clear()
print("Load")
waitpoint1()
clear()
print("Loadi")
waitpoint1()
clear()
print("Loadi")
waitpoint1()
clear()
print("Loadin")
waitpoint1()
clear()
print("Loading")
waitpoint1()
clear()
print("Loadin")
waitpoint1()
clear()
print("Loadi")
waitpoint1()
clear()
print("Load")
waitpoint1()
clear()
print("Loa")
waitpoint1()
clear()
print("Lo")
waitpoint1()
clear()
print("L")
waitpoint1()
clear()
| UltimateNate/TURPG | PyAnimationEngine.py | Python | gpl-3.0 | 2,064 | 0.009205 |
import my_data_file
d = my_data_file.my_data
print "Hello my name is %s and i am %d years of age and my coolnes is %d " % (d [ 'naam' ], d [ 'age' ], d ['coolheid']) | ArtezGDA/text-IO | Martijn/format.py | Python | mit | 170 | 0.058824 |
# Copyright © 2013, 2015, 2016, 2017, 2018, 2020, 2022 Tom Most <twm@freecog.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
"""
Yarrharr production server via Twisted Web
"""
import io
import json
import logging
import os
import re
import sys
from base64 import b64encode
import attr
from django.conf import settings
from django.dispatch import receiver
from twisted.internet import defer
from twisted.internet.endpoints import serverFromString
from twisted.logger import (
FileLogObserver,
FilteringLogObserver,
ILogFilterPredicate,
Logger,
LogLevel,
PredicateResult,
formatEvent,
globalLogBeginner,
globalLogPublisher,
)
from twisted.python.filepath import FilePath
from twisted.web.resource import ErrorPage, NoResource, Resource
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.wsgi import WSGIResource
from zope.interface import implementer
from . import __version__
from .signals import schedule_changed
from .wsgi import application
log = Logger()
@attr.s
class CSPReport(object):
url = attr.ib()
referrer = attr.ib()
resource = attr.ib()
violatedDirective = attr.ib()
effectiveDirective = attr.ib()
source = attr.ib()
sample = attr.ib()
status = attr.ib()
policy = attr.ib()
disposition = attr.ib()
def __str__(self):
bits = []
for a in attr.fields(self.__class__):
value = getattr(self, a.name)
if value is None:
continue
bits.append("{}={!r}".format(a.name, value))
return "\n".join(bits)
@classmethod
def fromJSON(cls, data):
"""
Construct a :class:`CSPReport` from the serialization of a violation
per CSP Level 3 §5.3.
"""
if {"source-file", "line-number", "column-number"} <= data.keys():
source = "{source-file} {line-number}:{column-number}".format_map(data)
elif {"source-file", "line-number"} <= data.keys():
source = "{source-file} {line-number}".format_map(data)
else:
source = data.get("source-file")
return cls(
url=data["document-uri"],
referrer=data["referrer"] or None, # Always seems to be an empty string.
resource=data["blocked-uri"],
violatedDirective=data.get("violated-directive"),
effectiveDirective=data.get("effective-directive"),
policy=data["original-policy"],
disposition=data.get("disposition"),
status=data.get("status-code"),
sample=data.get("script-sample") or None,
source=source,
)
class CSPReportLogger(Resource):
isLeaf = True
_log = Logger()
def render(self, request):
if request.method != b"POST":
request.setResponseCode(405)
request.setHeader("Allow", "POST")
return b"HTTP 405: Method Not Allowed\n"
if request.requestHeaders.getRawHeaders("Content-Type") != ["application/csp-report"]:
request.setResponseCode(415)
return b"HTTP 415: Only application/csp-report requests are accepted\n"
# Process the JSON text produced per
# https://w3c.github.io/webappsec-csp/#deprecated-serialize-violation
report = CSPReport.fromJSON(json.load(io.TextIOWrapper(request.content, encoding="utf-8"))["csp-report"])
if report.sample and report.sample.startswith(";(function installGlobalHook(window) {"):
# This seems to be a misbehavior in some Firefox extension.
# I cannot reproduce it with a clean profile.
return b""
if report.sample and report.sample == "call to eval() or related function blocked by CSP":
# This is caused by Tridactyl due to a Firefox issue. It's quite
# chatty so we'll disable for now, even though the message is
# generated by the browser and might indicate a script injection.
# See <https://github.com/cmcaine/tridactyl/issues/109> and
# <https://bugzilla.mozilla.org/show_bug.cgi?id=1267027>.
return b""
self._log.debug(
"Content Security Policy violation reported by {userAgent!r}:\n{report}",
userAgent=", ".join(request.requestHeaders.getRawHeaders("User-Agent", [])),
report=report,
)
return b"" # Browser ignores the response.
class FallbackResource(Resource):
"""
Resource which falls back to an alternative resource tree if it doesn't
have a matching child resource.
"""
def __init__(self, fallback):
Resource.__init__(self)
self.fallback = fallback
def render(self, request):
"""
Render this path with the fallback resource.
"""
return self.fallback.render(request)
def getChild(self, path, request):
"""
Dispatch unhandled requests to the fallback resource.
"""
# Mutate the request path such that it's like FallbackResource didn't handle
# the request at all. This is a bit of a nasty hack, since we're
# relying on the t.w.server implementation's behavior to not break when
# we do this. A better way would be to create a wrapper for the request object
request.postpath.insert(0, request.prepath.pop())
return self.fallback
class Static(Resource):
"""
Serve up Yarrharr's static assets directory. The files in this directory
have names like::
In development, the files are served uncompressed and named like so::
main-afffb00fd22ca3ce0250.js
The second dot-delimited section is a hash of the file's contents or source
material. As the filename changes each time the content does, these files
are served with a long max-age and the ``immutable`` flag in the
`Cache-Control`_ header.
In production, each file has two pre-compressed variants: one with
a ``.gz`` extension, and one with a ``.br`` extension. For example::
main-afffb00fd22ca3ce0250.js
main-afffb00fd22ca3ce0250.js.br
main-afffb00fd22ca3ce0250.js.gz
The actual serving of the files is done by `twisted.web.static.File`, which
is fancy and supports range requests, conditional gets, etc.
.. note::
Several features used here are only available to HTTPS origins.
Cache-Control: immutable and Brotli compression both are in Firefox.
.. _cache-control: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
"""
_dir = FilePath(settings.STATIC_ROOT)
_validName = re.compile(rb"^[a-zA-Z0-9]+-[a-zA-Z0-9]+(\.[a-z]+)+$")
# NOTE: RFC 7231 § 5.3.4 is not completely clear about whether
# content-coding tokens are case-sensitive or not. The "identity" token
# appears in EBNF and is therefore definitely case-insensitive, but the
# other tokens only appear in IANA registry tables in lowercase form. In
# contrast, the transfer-coding possibilities are clearly defined in EBNF
# so are definitely case-insensitive. For content-coding every implementer
# seems to agree on lowercase, so I'm not going to worry about it.
_brToken = re.compile(rb"(:?^|[\s,])br(:?$|[\s,;])")
_gzToken = re.compile(rb"(:?^|[\s,])(:?x-)?gzip(:?$|[\s,;])")
_contentTypes = {
b".js": "application/javascript",
b".css": "text/css",
b".map": "application/octet-stream",
b".ico": "image/x-icon",
b".svg": "image/svg+xml",
b".png": "image/png",
}
def _file(self, path, type, encoding=None):
"""
Construct a `twisted.web.static.File` customized to serve Yarrharr
static assets.
:param path: `twisted.internet.filepath.FilePath` instance
:returns: `twisted.web.resource.IResource`
"""
f = File(path.path)
f.type = type
f.encoding = encoding
return f
def getChild(self, path, request):
"""
Serve a file for the given path.
The Content-Type header is set based on the file extension.
A limited form of content negotiation is done based on the
Accept-Encoding header and the files on disk. Apart from the default of
``identity``, two encodings are supported:
* ``br``, which selects any Brotli-compressed ``.br`` variant of
the file.
* ``gzip``, which selects any gzip-compressed ``.br`` variant of the
file. ``x-gzip`` is also supported.
qvalues are ignored as browsers don't use them. This may produce an
incorrect response if a variant is disabled like ``identity;q=0``.
"""
if not self._validName.match(path):
return NoResource("Not found.")
ext = path[path.rindex(b".") :]
try:
type = self._contentTypes[ext]
except KeyError:
return NoResource("Unknown type.")
acceptEncoding = request.getHeader(b"accept-encoding") or b"*"
file = None
if self._brToken.search(acceptEncoding):
br = self._dir.child(path + b".br")
if br.isfile():
file = self._file(br, type, "br")
if file is None and self._gzToken.search(acceptEncoding):
gz = self._dir.child(path + b".gz")
if gz.isfile():
file = self._file(gz, type, "gzip")
if file is None:
file = self._file(self._dir.child(path), type)
request.setHeader(b"Vary", b"accept-encoding")
request.setHeader(b"Cache-Control", b"public, max-age=31536000, immutable")
return file
class Root(FallbackResource):
"""
Root of the Yarrharr URL hierarchy.
"""
def __init__(self, reactor, threadpool):
wsgi = WSGIResource(reactor, threadpool, application)
FallbackResource.__init__(self, wsgi)
self.putChild(b"csp-report", CSPReportLogger())
self.putChild(b"static", Static())
# Handle requests for /favicon.ico and paths hit by script kiddies at
# the Twisted level so that they don't make it down to Django, which
# logs 404s as errors:
a404 = ErrorPage(404, "Not Found", "")
for path in (b"favicon.ico", b"index.php", b"wp-login.php"):
self.putChild(path, a404)
def getChildWithDefault(self, name, request):
# Disable the Referer header in some browsers. This is complemented by
# the injection of rel="noopener noreferrer" on all links by the HTML
# sanitizer.
request.setHeader(b"Referrer-Policy", b"same-origin")
request.setHeader(b"X-Content-Type-Options", b"nosniff")
request.setHeader(b"Cross-Origin-Opener-Policy", b"same-origin")
script_nonce = b64encode(os.urandom(32))
request.requestHeaders.setRawHeaders(b"Yarrharr-Script-Nonce", [script_nonce])
request.setHeader(
b"Content-Security-Policy",
(
# b"default-src 'none'; "
b"img-src *; "
b"script-src 'self' 'nonce-%s'; "
b"style-src 'self'; "
b"frame-ancestors 'none'; "
b"form-action 'self'; "
b"report-uri /csp-report"
)
% (script_nonce,),
)
return super().getChildWithDefault(name, request)
def updateFeeds(reactor, max_fetch=5):
"""
Poll any feeds due for a check.
"""
from .fetch import poll
def _failed(reason):
"""
Log unexpected errors and schedule a retry in one second.
"""
log.failure("Unexpected failure polling feeds", failure=reason)
return 1.0 # seconds until next poll
d = poll(reactor, max_fetch)
# Last gasp error handler to avoid terminating the LoopingCall.
d.addErrback(_failed)
return d
_txLevelToPriority = {
LogLevel.debug: "<7>",
LogLevel.info: "<6>",
LogLevel.warn: "<4>",
LogLevel.error: "<3>",
LogLevel.critical: "<2>",
}
def formatForSystemd(event):
# Events generated by twisted.python.log have a "system", while ones
# generated with twisted.logger have a "namespace" with similar
# meaning.
#
s = "[{}] ".format(event.get("log_system") or event.get("log_namespace") or "-")
s += formatEvent(event)
if not s:
return None
if "log_failure" in event:
try:
s += "\n" + event["log_failure"].getTraceback().rstrip("\n")
except: # noqa
pass
prefix = _txLevelToPriority.get(event.get("log_level")) or "<6>"
return prefix + s.replace("\n", "\n" + prefix + " ") + "\n"
@implementer(ILogFilterPredicate)
def dropUnhandledHTTP2Shutdown(event):
"""
Suppress the log messages which result from an unhandled error in HTTP/2
connection shutdown. See #282 and Twisted #9462.
This log message is relayed from the :mod:`twisted.python.log` so the
fields are a little odd:
* ``'log_namespace'`` is ``'log_legacy'``, and there is a ``'system'``
field with a value of ``'-'``.
* ``'log_text'`` contains the actual log text, including a pre-formatted
traceback.
* ``'failure'`` used instead of ``'log_failure'``.
"""
if event.get("log_namespace") != "log_legacy":
return PredicateResult.maybe
if event.get("log_level") != LogLevel.critical:
return PredicateResult.maybe
if "failure" not in event or not event["failure"].check(AttributeError):
return PredicateResult.maybe
if event["log_text"].startswith("Unhandled Error") and "no attribute 'shutdown'" in event["log_text"]:
return PredicateResult.no
return PredicateResult.maybe
class TwistedLoggerLogHandler(logging.Handler):
publisher = globalLogPublisher
def _mapLevel(self, levelno):
"""
Convert a stdlib logging level into a Twisted :class:`LogLevel`.
"""
if levelno <= logging.DEBUG:
return LogLevel.debug
elif levelno <= logging.INFO:
return LogLevel.info
elif levelno <= logging.WARNING:
return LogLevel.warn
elif levelno <= logging.ERROR:
return LogLevel.error
return LogLevel.critical
def emit(self, record):
self.publisher(
{
"log_level": self._mapLevel(record.levelno),
"log_namespace": record.name,
"log_format": "{msg}",
"msg": self.format(record),
}
)
class AdaptiveLoopingCall(object):
"""
:class:`AdaptiveLoopingCall` invokes a function periodically. Each time it
is called it returns the time to wait until the next invocation.
:ivar _clock: :class:`IReactorTime` implementer
:ivar _f: The function to call.
:ivar _deferred: Deferred returned by :meth:`.start()`.
:ivar _call: `IDelayedCall` when waiting for the next poll period.
Otherwise `None`.
:ivar bool _poked: `True` when the function should be immediately invoked
again after it completes.
:ivar bool _stopped: `True` once `stop()` has been called.
"""
_deferred = None
_call = None
_poked = False
_stopped = False
def __init__(self, clock, f):
"""
:param clock: :class:`IReactorTime` provider to use when scheduling
calls.
:param f: The function to call when the loop is started. It must return
the number of seconds to wait before calling it again, or
a deferred for the same.
"""
self._clock = clock
self._f = f
def start(self):
"""
Call the function immediately, and schedule future calls according to
its result.
:returns:
:class:`Deferred` which will succeed when :meth:`stop()` is called
and the loop cleanly exits, or fail when the function produces
a failure.
"""
assert self._deferred is None
assert self._call is None
assert not self._stopped
self._deferred = d = defer.Deferred()
self._callIt()
return d
def stop(self):
self._stopped = True
if self._call:
self._call.cancel()
self._deferred.callback(self)
def poke(self):
"""
Run the function as soon as possible: either immediately or once it has
finished any current execution. This is a no-op if the service has been
stopped. Pokes coalesce if received while the function is executing.
"""
if self._stopped or self._poked:
return
if self._call:
self._call.cancel()
self._callIt()
else:
self._poked = True
def _callIt(self):
self._call = None
d = defer.maybeDeferred(self._f)
d.addCallback(self._schedule)
d.addErrback(self._failLoop)
def _schedule(self, seconds):
"""
Schedule the next call.
"""
assert isinstance(seconds, (int, float))
if self._stopped:
d, self._deferred = self._deferred, None
d.callback(self)
elif self._poked:
self._poked = False
self._callIt()
else:
self._call = self._clock.callLater(seconds, self._callIt)
def _failLoop(self, failure):
"""
Terminate the loop due to an unhandled failure.
"""
d, self._deferred = self._deferred, None
d.errback(failure)
def run():
from twisted.internet import reactor
root = logging.getLogger()
logging.getLogger("django").setLevel(logging.INFO)
logging.raiseExceptions = settings.DEBUG
logging._srcfile = None # Disable expensive collection of location information.
root.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
root.addHandler(TwistedLoggerLogHandler())
observer = FilteringLogObserver(
FileLogObserver(sys.stdout, formatForSystemd),
[dropUnhandledHTTP2Shutdown],
)
globalLogBeginner.beginLoggingTo([observer], redirectStandardIO=False)
log.info("Yarrharr {version} starting", version=__version__)
factory = Site(Root(reactor, reactor.getThreadPool()), logPath=None)
endpoint = serverFromString(reactor, settings.SERVER_ENDPOINT)
reactor.addSystemEventTrigger("before", "startup", endpoint.listen, factory)
updateLoop = AdaptiveLoopingCall(reactor, lambda: updateFeeds(reactor))
loopEndD = updateLoop.start()
loopEndD.addErrback(lambda f: log.failure("Polling loop broke", f))
@receiver(schedule_changed)
def threadPollNow(sender, **kwargs):
"""
When the `schedule_changed` signal is sent poke the polling loop. If it
is sleeping this will cause it to poll immediately. Otherwise this will
cause it to run the poll function immediately once it returns (running
it again protects against races).
"""
log.debug("Immediate poll triggered by {sender}", sender=sender)
reactor.callFromThread(updateLoop.poke)
def stopUpdateLoop():
updateLoop.stop()
return loopEndD
reactor.addSystemEventTrigger("before", "shutdown", stopUpdateLoop)
reactor.run()
| twm/yarrharr | yarrharr/application.py | Python | gpl-3.0 | 20,459 | 0.000978 |
import sqlite3
import sys
"""<Mindpass is a intelligent password manager written in Python3
that checks your mailbox for logins and passwords that you do not remember.>
Copyright (C) <2016> <Cantaluppi Thibaut, Garchery Martial, Domain Alexandre, Boulmane Yassine>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
sys.path.append('../fenetres/')
from functools import partial
from PyQt5 import QtWidgets, QtGui, QtCore
from fenetreGestion import Ui_fenetreGestion
from requetes import *
import numpy as np
import colorsys
bdd = "../../../Traitement_mails/bdd.sq3"
def print_(arg):
"""
Args:
arg: la valeur à afficher
Returns: la valeur à afficher ainsi qu'une série de '-', afin d'espacer l'affichage
"""
print(arg)
print("-------------------------------------")
class LineEditWithFocusOut(QtWidgets.QLineEdit):
"""docstring for LineEditWithFocusOut
Ré-implémentation de QLineEdit(), en modifiant son comportement
lors d'un focusOut event. Ici, on update l'identifiant de la
table sites_reconnus_"+self.nom_table+".
"""
def __init__(self, nom_table):
super().__init__()
self.nom_table = nom_table
def focusOutEvent(self, arg):
QtWidgets.QLineEdit.focusOutEvent(self, arg)
# self.id contient l'id de la LigneEdit, ajouté dans afficher_ligne_site()
requete= "UPDATE sites_reconnus_"+self.nom_table+" SET identifiant =? WHERE rowid=?"
bdd_update(requete, (self.text(), self.id +1))
if self.text() == "":
self.setPlaceholderText("Ajouter un pseudo")
class LigneSite(object):
"""docstring for LigneSite"""
def __init__(self, y, site_web, identifiant, mdp, categorie, objet, nom_table):
self.position = y
self.objet = objet
self.nom_site = site_web
self.nom_mdp = mdp
self.nom_cat = categorie
self.nom_table = nom_table
self.ligne = QtWidgets.QHBoxLayout()
self.site_web =QtWidgets.QLabel()
self.site_web.setAlignment(QtCore.Qt.AlignCenter)
self.site_web.setObjectName("site_web")
self.site_web.setText(site_web)
self.ligne.addWidget(self.site_web)
self.identifiant = LineEditWithFocusOut(self.nom_table)
self.identifiant.setAlignment(QtCore.Qt.AlignCenter)
self.identifiant.setObjectName('identifiant')
self.identifiant.id = y
if identifiant is None or identifiant == "":
self.identifiant.setPlaceholderText("Ajouter un pseudo")
else:
self.identifiant.setText(identifiant)
self.ligne.addWidget(self.identifiant)
self.mdp = QtWidgets.QComboBox()
self.mdp.setObjectName("mdp")
self.afficher_combo_pwd() # affichage des éléments de la combobox en fonction de la bdd
self.ligne.addWidget(self.mdp)
self.categorie = QtWidgets.QComboBox()
self.categorie.setObjectName("categorie")
self.afficher_combo_cat() # affichage des éléments de la combobox en fonction de la bdd
self.ligne.addWidget(self.categorie)
self.ligne.setStretch(0, 2)
self.ligne.setStretch(1, 2)
self.ligne.setStretch(2, 2)
self.ligne.setStretch(3, 2)
self.categorie.currentIndexChanged.connect(self.changement_cat)
self.mdp.currentIndexChanged.connect(self.changement_pwd)
def changement_cat(self, event):
requete ="SELECT categorie FROM sites_reconnus_"+self.nom_table+" WHERE rowid=?"
ancienne_categorie = toliste(bdd_select(requete, (self.position+1,)))[0]
# On ajoute le site_web sous la catégorie correspondante
requete= "UPDATE sites_reconnus_"+self.nom_table+" SET categorie=? WHERE rowid=?"
bdd_update(requete, (self.categorie.currentText(), self.position +1))
print("Catégorie changée en"+ self.categorie.currentText())
for k in range(len(self.objet.cats)):
if(self.objet.cats[k].nom == self.categorie.currentText()):
liste_label_name =[]
for element in self.objet.cats[k].labels:
liste_label_name.append(element.text())
if(self.categorie.currentText() not in liste_label_name):
label = QtWidgets.QLabel()
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
label.setFont(font)
label.setObjectName("sites_lies_cat")
label.setText(self.site_web.text())
self.objet.cats[k].labels.append(label)
self.objet.cats[k].verticalLayout_groupBox.addWidget(label)
break
# On met à jour le groupBox de l'ancienne catégorie
for k in range(len(self.objet.cats)):
if(self.objet.cats[k].nom == ancienne_categorie):
for label in self.objet.cats[k].labels:
label.deleteLater()
self.objet.cats[k].labels = []
requete ="SELECT site_web FROM sites_reconnus_"+self.nom_table+" WHERE categorie=?"
sites_lies= toliste(bdd_select(requete, (ancienne_categorie,)))
self.objet.cats[k].affichage_sites_lies(sites_lies)
# On update le label dont la catégorie a été changée
for pwd in self.objet.pwds:
for label in pwd.labels:
if(label.texte == self.nom_site):
pwd.update(label, self.categorie.currentText())
# On update la couleur du groupBox_pwd contenant le label associé
pwd.update_color_groupBox()
def changement_pwd(self):
requete ="SELECT mdp FROM sites_reconnus_"+self.nom_table+" WHERE rowid=?"
ancien_mdp = toliste(bdd_select(requete, (self.position+1,)))[0]
# On ajoute le site_web sous le mdp correspondant
requete= "UPDATE sites_reconnus_"+self.nom_table+" SET mdp=? WHERE rowid=?"
nouveau_mdp = self.mdp.currentText()
bdd_update(requete, (nouveau_mdp , self.position +1))
print("Mdp changée en"+ nouveau_mdp)
for k in range(len(self.objet.pwds)):
if(self.objet.pwds[k].nom == nouveau_mdp):
liste_label_name =[]
for element in self.objet.pwds[k].labels:
liste_label_name.append(element.text())
if(nouveau_mdp not in liste_label_name):
self.objet.pwds[k].label(self.site_web.text())
break
# On met à jour le groupBox de l'ancienn mdp
for k in range(len(self.objet.pwds)):
if(self.objet.pwds[k].nom == ancien_mdp):
for label in self.objet.pwds[k].labels:
label.deleteLater()
self.objet.pwds[k].labels = []
requete ="SELECT site_web FROM sites_reconnus_"+self.nom_table+" WHERE mdp=?"
sites_lies= toliste(bdd_select(requete, (ancien_mdp,)))
self.objet.pwds[k].affichage_sites_lies(sites_lies)
for pwd in self.objet.pwds:
if(pwd.nom == ancien_mdp):
pwd.update_color_groupBox()
elif(pwd.nom == nouveau_mdp):
pwd.update_color_groupBox()
def update_pwd_combobox(self, complet):
print(self.mdp.maxCount())
def afficher_combo_pwd(self):
requete= "SELECT mdp FROM mdps_"+self.nom_table+""
tab = bdd_select(requete)
result = []
for k in range(len(tab)):
result.append(tab[k][0])
self.mdp.addItem(self.nom_mdp)
for pwd in result:
if pwd and pwd != self.nom_mdp:
self.mdp.addItem(pwd)
if(self.nom_mdp and self.nom_mdp != ""):
self.mdp.addItem("")
def afficher_combo_cat(self):
requete= "SELECT nom_categorie FROM categories_"+self.nom_table
tab = bdd_select(requete)
result = []
for k in range(len(tab)):
result.append(tab[k][0])
self.categorie.addItem(self.nom_cat)
for cat in result:
if cat and cat != self.nom_cat:
self.categorie.addItem(cat)
if(self.nom_cat and self.nom_cat != ""):
self.categorie.addItem("")
class Ligne(object):
"""docstring for ligneCategorie
(objet) est l'objet contenant tous les éléments de la fenetre.
Permet d'accéder à ces éléments et de les modifier.
"""
def __init__(self, position, nom, sites_lies, objet, nom_table):
self.position = position
self.nom = nom
self.sites_lies = sites_lies
self.objet = objet
self.nom_table =nom_table
self.ligne = QtWidgets.QHBoxLayout()
self.pushButton = QtWidgets.QPushButton()
self.pushButton.setMinimumSize(QtCore.QSize(24, 24))
self.groupBox = QtWidgets.QGroupBox()
self.colorHEX = "#757575"
self.labels = [] # contiendra la liste des labels (noms des sites liés)
self.groupBox.setGeometry(QtCore.QRect(20, 50, 91, 50))
font = QtGui.QFont()
font.setPointSize(11)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_groupBox = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_groupBox.setObjectName("verticalLayout_groupBox")
self.ligne.addWidget(self.groupBox)
self.ligne.addWidget(self.pushButton)
self.ligne.setStretch(0, 20)
self.ligne.setStretch(1, 1)
self.affichage_sites_lies(sites_lies)
# Evènement
self.pushButton.clicked.connect(self.msgbox)
def msgbox(self):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Voulez-vous vraiment supprimer \""+ str(self.nom) + "\" ?")
msg.setIcon(2)
msg.setInformativeText("Les liens établis avec les sites seront perdus.")
msg.setWindowTitle("Confirmer suppression")
msg.addButton(QtWidgets.QPushButton('Oui'), QtWidgets.QMessageBox.YesRole)
msg.addButton(QtWidgets.QPushButton('Non'), QtWidgets.QMessageBox.NoRole)
msg.buttonClicked.connect(self.msgbtn)
ret = msg.exec_();
def msgbtn(self, buttonClicked):
if(buttonClicked.text() == "Oui"):
self.suppression()
def suppression(self):
self.suppression_bdd()
self.suppression_affichage()
def affichage_sites_lies(self, site_lies):
pass
class Categorie(Ligne):
"""docstring for Categorie"""
def __init__(self, position, nom, sites_lies, objet, nom_table):
# On exécute Ligne.__init__()
super().__init__(position, nom, sites_lies, objet, nom_table)
# On ajoute d'autres attributs/propriétés
self.ligne.setObjectName("ligne_categorie")
self.groupBox.setObjectName("groupBox_cat")
self.groupBox.setTitle(nom)
self.pushButton.setObjectName("pushButton_cat")
def setColor(self, k, nb_cat):
num_colors=nb_cat
colors=[]
for i in np.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + np.random.rand() * 10)/100.
saturation = (55 + np.random.rand() * 10)/100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
t= colors
self.colorRGB = (int(t[k][0]*255),int(t[k][1]*255),int(t[k][2]*255))
self.colorHEX ='#%02x%02x%02x' % self.colorRGB
self.groupBox.setStyleSheet("QGroupBox {\n"
"border: 2px solid rgb(" + str(self.colorRGB[0]) + "," + str(self.colorRGB[1]) + "," + str(self.colorRGB[2]) + ");\n"
"}\n"
"QGroupBox:title {\n"
"color: rgb(" + str(self.colorRGB[0]) + "," + str(self.colorRGB[1]) + "," + str(self.colorRGB[2]) + ");\n"
"}\n"
)
def affichage_sites_lies(self, sites_lies):
for site in sites_lies:
label = QtWidgets.QLabel()
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
label.setFont(font)
label.setObjectName("sites_lies_cat")
label.setText(site)
self.labels.append(label)
self.verticalLayout_groupBox.addWidget(label)
def suppression_bdd(self):
requete = "DELETE FROM categories_"+self.nom_table +" WHERE nom_categorie=?"
bdd_delete(requete, (self.nom,))
print("Categorie supprimée: "+ self.nom)
def suppression_affichage(self):
# suppression combobox
for k in range(len(self.objet.sites)):
if self.objet.sites[k].categorie.currentText() == self.nom:
# si la catégorie supprimée était celle du site, alors on change la catégorie de celui-ci en le choix vide:""
if self.objet.sites[k].categorie.findText("") == -1:
# si il n'y a pas le choix vide "", on l'ajoute
self.objet.sites[k].categorie.addItem("")
self.objet.sites[k].categorie.setCurrentIndex(self.objet.sites[k].categorie.findText(""))
index = self.objet.sites[k].categorie.findText(self.nom)
self.objet.sites[k].categorie.removeItem(index)
# destruction des layouts dans la scroll_area
self.objet.scrollAreaWidgetContents_cat.deleteLater()
# on vide les attributs
self.objet.cats = []
# On en recrée un vide
self.objet.scrollAreaWidgetContents_cat = QtWidgets.QWidget()
self.objet.scrollAreaWidgetContents_cat.setGeometry(QtCore.QRect(0, 0, 177, 767))
self.objet.scrollAreaWidgetContents_cat.setObjectName("scrollAreaWidgetContents_cat")
self.objet.verticalLayout_3 = QtWidgets.QVBoxLayout(self.objet.scrollAreaWidgetContents_cat)
self.objet.verticalLayout_3.setObjectName("verticalLayout_3")
self.objet.scrollArea_cat.setWidget(self.objet.scrollAreaWidgetContents_cat)
# on relance la méthode d'affichage des catégories
self.objet.afficher_categories()
self.objet.actualiser_couleur_pwd()
def ajout_combobox(self):
for k in range(len(self.objet.sites)):
self.objet.sites[k].categorie.addItem(self.nom)
class Password(Ligne):
"""docstring for Password"""
def __init__(self, position, nom, sites_lies, objet, nom_table):
super().__init__(position, nom, sites_lies, objet, nom_table)
self.ligne.setObjectName("ligne_pwd")
self.groupBox.setObjectName("groupBox_pwd")
self.groupBox.setTitle(nom)
self.pushButton.setObjectName("pushButton_pwd")
# On modifie la couleur de la groupBox_pwd
self.update_color_groupBox()
def update_title(self, titre):
self.groupBox.setTitle(titre)
def affichage_sites_lies(self, sites_lies):
for site in sites_lies:
self.label(site)
def label(self, site):
label = QtWidgets.QLabel()
label.texte = site
font = QtGui.QFont()
font.setPointSize(9)
font.setItalic(True)
label.setFont(font)
label.setObjectName("sites_lies_pwd")
label.colorRGB = self.getColor_label(site)[0]
label.colorHEX = self.getColor_label(site)[1]
texte = self.create_text_label(label.colorHEX, site)
label.setText(texte)
self.labels.append(label)
self.verticalLayout_groupBox.addWidget(label)
def create_text_label(self, couleur, site):
texte = "<font size='5' font-style='' color="+couleur+">•</font> "
for lettre in site:
texte += lettre
return(texte)
def update(self, label, categorie):
couleur ="#fff"
for k in range(len(self.objet.cats)):
if(self.objet.cats[k].nom == categorie):
couleur = self.objet.cats[k].colorHEX
label.colorHEX = couleur
texte= self.create_text_label(couleur, label.texte)
label.setText(texte)
def update_color_groupBox(self):
colorGroupBox = self.colorHEX
if(self.labels != []):
if(self.labels[0].colorHEX != "#fff"):
colorGroupBox = self.labels[0].colorHEX
b = 1
for label in self.labels:
if(label.colorHEX != colorGroupBox):
b=0
if(not b):
colorGroupBox = "#757575"
else:
colorGroupBox = "#757575"
self.groupBox.setStyleSheet("QGroupBox {"
"border-color:"+colorGroupBox+";"
"}")
def getColor_label(self, site):
"""En paramètre le site, retourne un tableau de couleur [RGB, HEX] (associée à la categorie
éventuellement assignées
"""
requete = "SELECT categorie FROM sites_reconnus_"+self.nom_table+" WHERE site_web=?"
categorie = toliste(bdd_select(requete, (site,)))[0]
tab = ["rgb(255,255,255)","#fff"]
for k in range(len(self.objet.cats)):
if(self.objet.cats[k].nom == categorie):
tab[0] = self.objet.cats[k].colorRGB
tab[1] = self.objet.cats[k].colorHEX
return(tab)
def suppression_bdd(self):
requete = "DELETE FROM mdps_"+self.nom_table+" WHERE mdp=?"
bdd_delete(requete, (self.nom,))
print("Pwd supprimée: "+ self.nom)
def suppression_affichage(self):
# suppression combobox
for k in range(len(self.objet.sites)):
if self.objet.sites[k].mdp.currentText() == self.nom:
# si le mdp supprimée était celui du site, alors on change le change en le choix vide:""
if self.objet.sites[k].mdp.findText("") == -1:
# si il n'y a pas le choix vide "", on l'ajoute
self.objet.sites[k].mdp.addItem("")
self.objet.sites[k].mdp.setCurrentIndex(self.objet.sites[k].mdp.findText(""))
index = self.objet.sites[k].mdp.findText(self.nom)
self.objet.sites[k].mdp.removeItem(index)
# destruction des layouts dans la scroll_area
self.objet.scrollAreaWidgetContents_pwd.deleteLater()
# on vide les attributs
self.objet.pwds = []
# On en recrée un vide
self.objet.scrollAreaWidgetContents_pwd = QtWidgets.QWidget()
self.objet.scrollAreaWidgetContents_pwd.setGeometry(QtCore.QRect(0, 0, 177, 767))
self.objet.scrollAreaWidgetContents_pwd.setObjectName("scrollAreaWidgetContents_cat")
self.objet.verticalLayout_2 = QtWidgets.QVBoxLayout(self.objet.scrollAreaWidgetContents_pwd)
self.objet.verticalLayout_2.setObjectName("verticalLayout_3")
self.objet.scrollArea_pwd.setWidget(self.objet.scrollAreaWidgetContents_pwd)
# on relance la méthode d'affichage des mdps_"+self.nom_table+"
self.objet.afficher_pwds()
class ClasseGestion(Ui_fenetreGestion):
def __init__(self, fenetre):
self.setupUi(fenetre)
self.ajouter_cat.setPlaceholderText("Ajouter une catégorie")
self.ajouter_pwd.setPlaceholderText("Ajouter un mot de passe")
self.lineEdit_ajout_site.setPlaceholderText("Ajouter un site web")
# Evènements
self.ajouter_cat.returnPressed.connect(self.check_if_exist_cat)
self.ajouter_pwd.returnPressed.connect(self.check_if_exist_pwd)
self.lineEdit_ajout_site.returnPressed.connect(self.check_new_site)
self.pushButton_ajout_site.clicked.connect(self.check_new_site)
self.sites = []
self.cats = []
self.pwds = []
def lancement(self, user_email, nom_table):
self.user_email = user_email
self.nom_table = nom_table
self.afficher_sites()
self.afficher_categories()
self.afficher_pwds()
self.setupMenu()
def setupMenu(self):
self.aide_url = "https://github.com/MindPass/Code/wiki/Aide"
self.apropos_url ="https://github.com/MindPass/Code"
self.actionObtenir_de_l_aide.triggered.connect(self.ouvrirAide)
self.actionA_propos_de_MindPass.triggered.connect(self.ouvrirApropos)
"""
self.actionMode_deux_lettres.triggered.connect(self.check_deux_lettres)
self.actionMode_complet.triggered.connect(self.check_complet)
self.menuAffichage()
"""
"""
def check_deux_lettres(self):
self.actionMode_deux_lettres.setChecked(True)
self.actionMode_complet.setChecked(False)
self.menuAffichage()
def check_complet(self):
self.actionMode_deux_lettres.setChecked(False)
self.actionMode_complet.setChecked(True)
self.menuAffichage()
def menuAffichage(self):
if(self.actionMode_deux_lettres.isChecked()):
self.affichage_deux_lettres()
else:
self.affichage_complet()
def affichage_complet(self):
for pwd in self.pwds:
pwd.update_title(pwd.nom)
for site in self.sites:
site.update_pwd_combobox(1)
def affichage_deux_lettres(self):
pass
"""
def ouvrirAide(self):
self.openURL(self.aide_url)
def ouvrirApropos(self):
self.openURL(self.apropos_url)
def openURL(self, given_url):
url = QtCore.QUrl(given_url)
if not QtGui.QDesktopServices.openUrl(url):
QtGui.QMessageBox.warning(self, "Open Url", "Could not open url")
def check_if_exist_cat(self):
"""
Vérifier que la catégorie en question n'est pas déjà dans la base de donnée
"""
if self.ajouter_cat.displayText() != "":
requete = "SELECT nom_categorie FROM categories_"+self.nom_table +" WHERE nom_categorie=?"
categories_table = bdd_select(requete, (self.ajouter_cat.displayText(),))
conditions = not categories_table or categories_table[0][0] != self.ajouter_cat.displayText()
if conditions:
self.ajouter_categorie()
# On actualise les couleurs des catégories
self.actualiser_couleur()
# On actualise les couleurs des labels dans la colonne Mots de Passe
self.actualiser_couleur_pwd()
def actualiser_couleur(self):
nb_cat = len(self.cats)
for i in range(nb_cat):
self.cats[i].setColor(i, nb_cat)
def afficher_categories(self):
requete= "SELECT nom_categorie FROM categories_"+self.nom_table
tab = bdd_select(requete)
if tab:
for k in range(len(tab)):
self.ajouter_ligne_categorie(k, tab[k][0])
self.actualiser_couleur()
def ajouter_categorie(self):
requete ="INSERT INTO categories_"+self.nom_table +" (nom_categorie) VALUES(?)"
bdd_insert(requete, (self.ajouter_cat.displayText(),))
#ajout dans les combobox
for k in range(len(self.sites)):
self.sites[k].categorie.addItem(self.ajouter_cat.displayText())
# ajout de la catégorie dans la scrollArea Categories
self.ajouter_ligne_categorie(len(self.cats), self.ajouter_cat.displayText())
print("Catégorie ajoutée : "+ str(self.ajouter_cat.displayText()))
self.ajouter_cat.setText("")
def ajouter_ligne_categorie(self, y, nom_categorie):
requete = "SELECT site_web FROM sites_reconnus_"+self.nom_table+" WHERE categorie=?"
sites_lies= toliste(bdd_select(requete, (nom_categorie,)))
self.cats.append(Categorie(y, nom_categorie, sites_lies, self , self.nom_table))
self.verticalLayout_3.addLayout(self.cats[y].ligne)
# On garde l'alignement haut
self.verticalLayout_3.setAlignment(QtCore.Qt.AlignTop)
def check_if_exist_pwd(self):
"""
Vérifier que le pwd en question n'est pas déjà dans la base de donnée
"""
if self.ajouter_pwd.displayText() != "":
requete = "SELECT mdp FROM mdps_"+self.nom_table+" WHERE mdp=?"
pwds_table = bdd_select(requete, (self.ajouter_pwd.displayText(),))
conditions = not pwds_table or pwds_table[0][0] != self.ajouter_pwd.displayText()
if conditions:
self.ajouter_password()
def afficher_pwds(self):
requete= "SELECT mdp FROM mdps_"+self.nom_table+""
tab = bdd_select(requete)
if tab:
for k in range(len(tab)):
self.ajouter_ligne_pwd(k, tab[k][0])
def ajouter_password(self):
requete = "INSERT INTO mdps_"+self.nom_table+" (mdp) VALUES(?)"
bdd_insert(requete, (self.ajouter_pwd.displayText(),))
#ajout dans les combobox
for k in range(len(self.sites)):
self.sites[k].mdp.addItem(self.ajouter_pwd.displayText())
# ajout dans la ScrollArea Passwords
self.ajouter_ligne_pwd(len(self.pwds), self.ajouter_pwd.displayText())
print("Password ajoutée : " + self.ajouter_pwd.displayText())
self.ajouter_pwd.setText("")
def ajouter_ligne_pwd(self, y, nom_pwd):
requete = "SELECT site_web FROM sites_reconnus_"+self.nom_table+" WHERE mdp=?"
sites_lies= toliste(bdd_select(requete, (nom_pwd,)))
self.pwds.append(Password(y, nom_pwd, sites_lies, self, self.nom_table))
self.verticalLayout_2.addLayout(self.pwds[y].ligne)
# On garde l'alignement haut
self.verticalLayout_2.setAlignment(QtCore.Qt.AlignTop)
def actualiser_couleur_pwd(self):
# destruction des layouts dans la scroll_area
self.scrollAreaWidgetContents_pwd.deleteLater()
# on vide les attributs
self.pwds = []
# On en recrée un vide
self.scrollAreaWidgetContents_pwd = QtWidgets.QWidget()
self.scrollAreaWidgetContents_pwd.setGeometry(QtCore.QRect(0, 0, 177, 767))
self.scrollAreaWidgetContents_pwd.setObjectName("scrollAreaWidgetContents_cat")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_pwd)
self.verticalLayout_2.setObjectName("verticalLayout_3")
self.scrollArea_pwd.setWidget(self.scrollAreaWidgetContents_pwd)
# on relance la méthode d'affichage des mdps_"+self.nom_table+"
self.afficher_pwds()
def afficher_sites(self):
requete= "SELECT site_web, identifiant, mdp, categorie FROM sites_reconnus_"+self.nom_table+""
tab = bdd_select(requete)
for k in range(len(tab)):
self.sites.append(LigneSite(k,tab[k][0], tab[k][1], tab[k][2], tab[k][3], self, self.nom_table))
self.verticalLayout.addLayout(self.sites[k].ligne)
def check_new_site(self):
requete = "SELECT site_web FROM sites_reconnus_"+self.nom_table+""
sites_web = toliste(bdd_select(requete))
if(self.lineEdit_ajout_site.text() not in sites_web and self.lineEdit_ajout_site.text() != ""):
requete = "INSERT INTO sites_reconnus_"+self.nom_table+" VALUES(?,?,?,?,?)"
valeurs =("",self.lineEdit_ajout_site.text(),"", "", "")
bdd_insert(requete, valeurs)
self.sites.append(LigneSite(len(self.sites), self.lineEdit_ajout_site.text(), "", "", "", self, self.nom_table))
self.verticalLayout.addLayout(self.sites[len(self.sites)-1].ligne)
self.lineEdit_ajout_site.setText("")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
fenetreGestion = QtWidgets.QMainWindow()
classGestion = ClasseGestion(fenetreGestion)
fenetreGestion.show()
sys.exit(app.exec_())
| MindPass/Code | Interface_graphique/PyQt/application/classeGestion.py | Python | gpl-3.0 | 24,892 | 0.028687 |
'''
/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
* Junior University
* Copyright (c) 2011, 2012 Open Networking Foundation
*
* We are making the OpenFlow specification and associated documentation
* (Software) available for public use and benefit with the expectation
* that others will use, modify and enhance the Software and contribute
* those enhancements back to the community. However, since we would
* like to make the Software available for broadest use, with as few
* restrictions as possible permission is hereby granted, free of
* charge, to any person obtaining a copy of this Software to deal in
* the Software under the copyrights without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* The name and trademarks of copyright holder(s) may NOT be used in
* advertising or publicity pertaining to the Software or any
* derivatives without specific, written prior permission.
*/
Created on 2015/7/14
:author: hubo
'''
from .common import *
from . import common
from namedstruct.namedstruct import rawtype as _rawtype
from namedstruct.namedstruct import StructDefWarning
import warnings as _warnings
with _warnings.catch_warnings():
_warnings.filterwarnings('ignore', '^padding', StructDefWarning)
ofp_port_no = enum('ofp_port_no',
globals(),
uint32,
OFPP_MAX = 0xffffff00,
OFPP_IN_PORT = 0xfffffff8,
OFPP_TABLE = 0xfffffff9,
OFPP_NORMAL = 0xfffffffa,
OFPP_FLOOD = 0xfffffffb,
OFPP_ALL = 0xfffffffc,
OFPP_CONTROLLER = 0xfffffffd,
OFPP_LOCAL = 0xfffffffe,
OFPP_ANY = 0xffffffff)
ofp_error_type = ofp_error_type.extend(globals(),
OFPET_BAD_INSTRUCTION = 3, #/* Error in instruction list. */
OFPET_BAD_MATCH = 4, #/* Error in match. */
OFPET_FLOW_MOD_FAILED = 5, #/* Problem modifying flow entry. */
OFPET_GROUP_MOD_FAILED = 6, #/* Problem modifying group entry. */
OFPET_PORT_MOD_FAILED = 7, #/* Port mod request failed. */
OFPET_TABLE_MOD_FAILED = 8, #/* Table mod request failed. */
OFPET_QUEUE_OP_FAILED = 9, #/* Queue operation failed. */
OFPET_SWITCH_CONFIG_FAILED = 10, #/* Switch config request failed. */
OFPET_ROLE_REQUEST_FAILED = 11, #/* Controller Role request failed. */
OFPET_METER_MOD_FAILED = 12, #/* Error in meter. */
OFPET_TABLE_FEATURES_FAILED = 13,# /* Setting table features failed. */
OFPET_EXPERIMENTER = 0xffff #/* Experimenter error messages. */
)
ofp_type = ofp_type.extend(globals(),
OFPT_EXPERIMENTER = 4, #/* Symmetric message */
# /* Switch configuration messages. */
OFPT_FEATURES_REQUEST = 5, #/* Controller/switch message */
OFPT_FEATURES_REPLY = 6, #/* Controller/switch message */
OFPT_GET_CONFIG_REQUEST = 7, #/* Controller/switch message */
OFPT_GET_CONFIG_REPLY = 8, #/* Controller/switch message */
OFPT_SET_CONFIG = 9, #/* Controller/switch message */
# /* Asynchronous messages. */
OFPT_PACKET_IN = 10, #/* Async message */
OFPT_FLOW_REMOVED = 11, #/* Async message */
OFPT_PORT_STATUS = 12, #/* Async message */
# /* Controller command messages. */
OFPT_PACKET_OUT = 13, #/* Controller/switch message */
OFPT_FLOW_MOD = 14, #/* Controller/switch message */
OFPT_GROUP_MOD = 15, #/* Controller/switch message */
OFPT_PORT_MOD = 16, #/* Controller/switch message */
OFPT_TABLE_MOD = 17, #/* Controller/switch message */
# /* Multipart messages. */
OFPT_MULTIPART_REQUEST = 18, #/* Controller/switch message */
OFPT_MULTIPART_REPLY = 19, #/* Controller/switch message */
# /* Barrier messages. */
OFPT_BARRIER_REQUEST = 20, #/* Controller/switch message */
OFPT_BARRIER_REPLY = 21, #/* Controller/switch message */
# /* Queue Configuration messages. */
OFPT_QUEUE_GET_CONFIG_REQUEST = 22, #/* Controller/switch message */
OFPT_QUEUE_GET_CONFIG_REPLY = 23, #/* Controller/switch message */
# /* Controller role change request messages. */
OFPT_ROLE_REQUEST = 24, #/* Controller/switch message */
OFPT_ROLE_REPLY = 25, #/* Controller/switch message */
# /* Asynchronous message configuration. */
OFPT_GET_ASYNC_REQUEST = 26, #/* Controller/switch message */
OFPT_GET_ASYNC_REPLY = 27, #/* Controller/switch message */
OFPT_SET_ASYNC = 28, #/* Controller/switch message */
# /* Meters and rate limiters configuration messages. */
OFPT_METER_MOD = 29, #/* Controller/switch message */
)
ofp_type_reply_set = set([OFPT_ECHO_REPLY, OFPT_FEATURES_REPLY, OFPT_GET_CONFIG_REPLY, OFPT_MULTIPART_REPLY, OFPT_BARRIER_REPLY, OFPT_QUEUE_GET_CONFIG_REPLY, OFPT_ROLE_REPLY, OFPT_GET_ASYNC_REPLY])
ofp_type_asyncmessage_set = set([OFPT_PACKET_IN, OFPT_FLOW_REMOVED, OFPT_PORT_STATUS])
OFP_VERSION = OFP13_VERSION
ofp_msg = nstruct(name = 'ofp_msg',
base = common.ofp_msg_mutable,
criteria = lambda x: x.header.version == OFP_VERSION,
init = packvalue(OFP_VERSION, 'header', 'version'),
classifyby = (OFP_VERSION,),
classifier = lambda x: x.header.type,
extend = {('header', 'type') : ofp_type})
'''
/* Switch configuration. */
'''
ofp_switch_config = nstruct((ofp_config_flags, 'flags'),
(uint16, 'miss_send_len'),
name = 'ofp_switch_config',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_GET_CONFIG_REPLY or x.header.type == OFPT_SET_CONFIG,
classifyby = (OFPT_SET_CONFIG, OFPT_GET_CONFIG_REPLY),
init = packvalue(OFPT_SET_CONFIG, 'header','type'))
'''
/* Configure/Modify behavior of a flow table */
'''
ofp_table_mod = nstruct(
(ofp_table, 'table_id'), # /* ID of the table, OFPTT_ALL indicates all tables */
(uint8[3],), # /* Pad to 32 bits */
(ofp_table_config, 'config'), # /* Bitmap of OFPTC_* flags */
name = 'ofp_table_mod',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_TABLE_MOD,
classifyby = (OFPT_TABLE_MOD,),
init = packvalue(OFPT_TABLE_MOD, 'header', 'type')
)
'''
/* Capabilities supported by the datapath. */
'''
ofp_capabilities = ofp_capabilities.extend(globals(),
OFPC_GROUP_STATS = 1 << 3, # /* Group statistics. */
OFPC_PORT_BLOCKED = 1 << 8 # /* Switch will block looping ports. */
)
'''
/* Current state of the physical port. These are not configurable from
* the controller.
*/
'''
ofp_port_state = ofp_port_state.extend(globals(),
OFPPS_BLOCKED = 1 << 1, # /* Port is blocked */
OFPPS_LIVE = 1 << 2, # /* Live for Fast Failover Group. */
)
'''
/* Features of ports available in a datapath. */
'''
ofp_port_features = ofp_port_features.extend(
OFPPF_40GB_FD = 1 << 7, #/* 40 Gb full-duplex rate support. */
OFPPF_100GB_FD = 1 << 8, #/* 100 Gb full-duplex rate support. */
OFPPF_1TB_FD = 1 << 9, #/* 1 Tb full-duplex rate support. */
OFPPF_OTHER = 1 << 10, #/* Other rate, not in the list. */
OFPPF_COPPER = 1 << 11, #/* Copper medium. */
OFPPF_FIBER = 1 << 12, #/* Fiber medium. */
OFPPF_AUTONEG = 1 << 13, #/* Auto-negotiation. */
OFPPF_PAUSE = 1 << 14, #/* Pause. */
OFPPF_PAUSE_ASYM = 1 << 15 #/* Asymmetric pause. */
)
'''
/* Description of a port */
'''
ofp_port = nstruct(
(ofp_port_no, 'port_no'),
(uint8[4],),
(mac_addr, 'hw_addr'),
(uint8[2],), # /* Align to 64 bits. */
(char[OFP_MAX_PORT_NAME_LEN], 'name'), # /* Null-terminated */
(ofp_port_config, 'config'), # /* Bitmap of OFPPC_* flags. */
(ofp_port_state, 'state'), # /* Bitmap of OFPPS_* flags. */
# /* Bitmaps of OFPPF_* that describe features. All bits zeroed if
# * unsupported or unavailable. */
(ofp_port_features, 'curr'), # /* Current features. */
(ofp_port_features, 'advertised'), # /* Features being advertised by the port. */
(ofp_port_features, 'supported'), # /* Features supported by the port. */
(ofp_port_features, 'peer'), # /* Features advertised by peer. */
(uint32, 'curr_speed'), # /* Current port bitrate in kbps. */
(uint32, 'max_speed'), # /* Max port bitrate in kbps */
name = 'ofp_port',
inline = False
)
'''
/* Switch features. */
'''
ofp_switch_features = nstruct((uint64, 'datapath_id'),
(uint32, 'n_buffers'),
(uint8, 'n_tables'),
(uint8, 'auxiliary_id'),
(uint8[2],),
(ofp_capabilities, 'capabilities'),
(uint32,),
name = 'ofp_switch_features',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_FEATURES_REPLY,
classifyby = (OFPT_FEATURES_REPLY,),
init = packvalue(OFPT_FEATURES_REPLY, 'header', 'type'))
'''
/* A physical port has changed in the datapath */
'''
ofp_port_status = nstruct(
(ofp_port_reason, 'reason'),
(uint8[7],),
(ofp_port, 'desc'),
name= 'ofp_port_status',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PORT_STATUS,
classifyby = (OFPT_PORT_STATUS,),
init = packvalue(OFPT_PORT_STATUS, 'header', 'type')
)
'''
/* Modify behavior of the physical port */
'''
ofp_port_mod = nstruct(
(uint32, 'port_no'),
(uint8[4],),
(mac_addr, 'hw_addr'),
(uint8[2],),
(ofp_port_config, 'config'), # /* Bitmap of OFPPC_* flags. */
(ofp_port_config, 'mask'), # /* Bitmap of OFPPC_* flags to be changed. */
(ofp_port_features, 'advertise'), # /* Bitmap of "ofp_port_features"s. Zero all bits to prevent any action taking place. */
(uint8[4],), # /* Pad to 64-bits. */
name = 'ofp_port_mod',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PORT_MOD,
classifyby = (OFPT_PORT_MOD,),
init = packvalue(OFPT_PORT_MOD, 'header', 'type')
)
'''
/* ## -------------------------- ## */
/* ## OpenFlow Extensible Match. ## */
/* ## -------------------------- ## */
/* The match type indicates the match structure (set of fields that compose the
* match) in use. The match type is placed in the type field at the beginning
* of all match structures. The "OpenFlow Extensible Match" type corresponds
* to OXM TLV format described below and must be supported by all OpenFlow
* switches. Extensions that define other match types may be published on the
* ONF wiki. Support for extensions is optional.
*/
/* Fields to match against flows */
'''
ofp_match = nstruct(
(ofp_match_type, 'type'), # /* One of OFPMT_* */
(uint16, 'length'), # /* Length of ofp_match (excluding padding) */
# /* Followed by:
# * - Exactly (length - 4) (possibly 0) bytes containing OXM TLVs, then
# * - Exactly ((length + 7)/8*8 - length) (between 0 and 7) bytes of
# * all-zero bytes
# * In summary, ofp_match is padded as needed, to make its overall size
# * a multiple of 8, to preserve alignment in structures using it.
# */
name = 'ofp_match',
size = lambda x: x.length,
prepack = packrealsize('length')
)
'''
/* Components of a OXM TLV header.
* Those macros are not valid for the experimenter class, macros for the
* experimenter class will depend on the experimenter header used. */
'''
def OXM_HEADER__(CLASS, FIELD, HASMASK, LENGTH):
return (((CLASS) << 16) | ((FIELD) << 9) | ((HASMASK) << 8) | (LENGTH))
def OXM_HEADER(CLASS, FIELD, LENGTH):
return OXM_HEADER__(CLASS, FIELD, 0, LENGTH)
def OXM_HEADER_W(CLASS, FIELD, LENGTH):
return OXM_HEADER__(CLASS, FIELD, 1, (LENGTH) * 2)
def OXM_CLASS(HEADER):
return ((HEADER) >> 16)
def OXM_FIELD(HEADER):
return (((HEADER) >> 9) & 0x7f)
def OXM_TYPE(HEADER):
return (((HEADER) >> 9) & 0x7fffff)
def OXM_HASMASK(HEADER):
return (((HEADER) >> 8) & 1)
def OXM_LENGTH(HEADER):
return ((HEADER) & 0xff)
def OXM_MAKE_WILD_HEADER(HEADER):
return OXM_HEADER_W(OXM_CLASS(HEADER), OXM_FIELD(HEADER), OXM_LENGTH(HEADER))
'''
/* OXM Class IDs.
* The high order bit differentiate reserved classes from member classes.
* Classes 0x0000 to 0x7FFF are member classes, allocated by ONF.
* Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation.
*/
'''
ofp_oxm_class = enum('ofp_oxm_class', globals(), uint16,
OFPXMC_NXM_0 = 0x0000, # /* Backward compatibility with NXM */
OFPXMC_NXM_1 = 0x0001, # /* Backward compatibility with NXM */
OFPXMC_OPENFLOW_BASIC = 0x8000, # /* Basic class for OpenFlow */
OFPXMC_EXPERIMENTER = 0xFFFF, # /* Experimenter class */
)
'''
/* OXM Flow match field types for OpenFlow basic class. */
'''
oxm_ofb_match_fields = enum('oxm_ofb_match_fields', globals(), uint8,
OFPXMT_OFB_IN_PORT = 0, # /* Switch input port. */
OFPXMT_OFB_IN_PHY_PORT = 1, # /* Switch physical input port. */
OFPXMT_OFB_METADATA = 2, # /* Metadata passed between tables. */
OFPXMT_OFB_ETH_DST = 3, # /* Ethernet destination address. */
OFPXMT_OFB_ETH_SRC = 4, # /* Ethernet source address. */
OFPXMT_OFB_ETH_TYPE = 5, # /* Ethernet frame type. */
OFPXMT_OFB_VLAN_VID = 6, # /* VLAN id. */
OFPXMT_OFB_VLAN_PCP = 7, # /* VLAN priority. */
OFPXMT_OFB_IP_DSCP = 8, # /* IP DSCP (6 bits in ToS field). */
OFPXMT_OFB_IP_ECN = 9, # /* IP ECN (2 bits in ToS field). */
OFPXMT_OFB_IP_PROTO = 10,# /* IP protocol. */
OFPXMT_OFB_IPV4_SRC = 11,# /* IPv4 source address. */
OFPXMT_OFB_IPV4_DST = 12,# /* IPv4 destination address. */
OFPXMT_OFB_TCP_SRC = 13,# /* TCP source port. */
OFPXMT_OFB_TCP_DST = 14,# /* TCP destination port. */
OFPXMT_OFB_UDP_SRC = 15,# /* UDP source port. */
OFPXMT_OFB_UDP_DST = 16,# /* UDP destination port. */
OFPXMT_OFB_SCTP_SRC = 17,# /* SCTP source port. */
OFPXMT_OFB_SCTP_DST = 18,# /* SCTP destination port. */
OFPXMT_OFB_ICMPV4_TYPE = 19,# /* ICMP type. */
OFPXMT_OFB_ICMPV4_CODE = 20,# /* ICMP code. */
OFPXMT_OFB_ARP_OP = 21,# /* ARP opcode. */
OFPXMT_OFB_ARP_SPA = 22,# /* ARP source IPv4 address. */
OFPXMT_OFB_ARP_TPA = 23,# /* ARP target IPv4 address. */
OFPXMT_OFB_ARP_SHA = 24,# /* ARP source hardware address. */
OFPXMT_OFB_ARP_THA = 25,# /* ARP target hardware address. */
OFPXMT_OFB_IPV6_SRC = 26,# /* IPv6 source address. */
OFPXMT_OFB_IPV6_DST = 27,# /* IPv6 destination address. */
OFPXMT_OFB_IPV6_FLABEL = 28,# /* IPv6 Flow Label */
OFPXMT_OFB_ICMPV6_TYPE = 29,# /* ICMPv6 type. */
OFPXMT_OFB_ICMPV6_CODE = 30,# /* ICMPv6 code. */
OFPXMT_OFB_IPV6_ND_TARGET = 31,# /* Target address for ND. */
OFPXMT_OFB_IPV6_ND_SLL = 32,# /* Source link-layer for ND. */
OFPXMT_OFB_IPV6_ND_TLL = 33,# /* Target link-layer for ND. */
OFPXMT_OFB_MPLS_LABEL = 34,# /* MPLS label. */
OFPXMT_OFB_MPLS_TC = 35,# /* MPLS TC. */
OFPXMT_OFB_MPLS_BOS = 36,# /* MPLS BoS bit. */
OFPXMT_OFB_PBB_ISID = 37,# /* PBB I-SID. */
OFPXMT_OFB_TUNNEL_ID = 38,# /* Logical Port Metadata. */
OFPXMT_OFB_IPV6_EXTHDR = 39,# /* IPv6 Extension Header pseudo-field */
)
OFPXMT_OFB_ALL = ((1 << 40) - 1)
'''
/* The VLAN id is 12-bits, so we can use the entire 16 bits to indicate
* special conditions.
*/
'''
ofp_vlan_id = enum('ofp_vlan_id', globals(),
OFPVID_PRESENT = 0x1000, #/* Bit that indicate that a VLAN id is set */
OFPVID_NONE = 0x0000, #/* No VLAN id was set. */
)
'''
/* Define for compatibility */
'''
OFP_VLAN_NONE = OFPVID_NONE
ofp_oxm_header = enum('ofp_oxm_header', globals(), uint32,
#===============================================================================
# /* OpenFlow port on which the packet was received.
# * May be a physical port, a logical port, or the reserved port OFPP_LOCAL
# *
# * Prereqs: None.
# *
# * Format: 32-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IN_PORT = OXM_HEADER(0x8000, OFPXMT_OFB_IN_PORT, 4),
#===============================================================================
# /* Physical port on which the packet was received.
# *
# * Consider a packet received on a tunnel interface defined over a link
# * aggregation group (LAG) with two physical port members. If the tunnel
# * interface is the logical port bound to OpenFlow. In this case,
# * OFPXMT_OF_IN_PORT is the tunnel's port number and OFPXMT_OF_IN_PHY_PORT is
# * the physical port number of the LAG on which the tunnel is configured.
# *
# * When a packet is received directly on a physical port and not processed by a
# * logical port, OFPXMT_OF_IN_PORT and OFPXMT_OF_IN_PHY_PORT have the same
# * value.
# *
# * This field is usually not available in a regular match and only available
# * in ofp_packet_in messages when it's different from OXM_OF_IN_PORT.
# *
# * Prereqs: OXM_OF_IN_PORT must be present.
# *
# * Format: 32-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IN_PHY_PORT = OXM_HEADER(0x8000, OFPXMT_OFB_IN_PHY_PORT, 4),
#===============================================================================
# /* Table metadata.
# *
# * Prereqs: None.
# *
# * Format: 64-bit integer in network byte order.
# *
# * Masking: Arbitrary masks.
# */
#===============================================================================
OXM_OF_METADATA = OXM_HEADER(0x8000, OFPXMT_OFB_METADATA, 8),
OXM_OF_METADATA_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_METADATA, 8),
#===============================================================================
# /* Source or destination address in Ethernet header.
# *
# * Prereqs: None.
# *
# * Format: 48-bit Ethernet MAC address.
# *
# * Masking: Arbitrary masks. */
#===============================================================================
OXM_OF_ETH_DST = OXM_HEADER (0x8000, OFPXMT_OFB_ETH_DST, 6),
OXM_OF_ETH_DST_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_ETH_DST, 6),
OXM_OF_ETH_SRC = OXM_HEADER (0x8000, OFPXMT_OFB_ETH_SRC, 6),
OXM_OF_ETH_SRC_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_ETH_SRC, 6),
#===============================================================================
# /* Packet's Ethernet type.
# *
# * Prereqs: None.
# *
# * Format: 16-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_ETH_TYPE = OXM_HEADER (0x8000, OFPXMT_OFB_ETH_TYPE, 2),
#===============================================================================
# /* 802.1Q VID.
# *
# * For a packet with an 802.1Q header, this is the VLAN-ID (VID) from the
# * outermost tag, with the CFI bit forced to 1. For a packet with no 802.1Q
# * header, this has value OFPVID_NONE.
# *
# * Prereqs: None.
# *
# * Format: 16-bit integer in network byte order with bit 13 indicating
# * presence of VLAN header and 3 most-significant bits forced to 0.
# * Only the lower 13 bits have meaning.
# *
# * Masking: Arbitrary masks.
# *
# * This field can be used in various ways:
# *
# * - If it is not constrained at all, the nx_match matches packets without
# * an 802.1Q header or with an 802.1Q header that has any VID value.
# *
# * - Testing for an exact match with 0x0 matches only packets without
# * an 802.1Q header.
# *
# * - Testing for an exact match with a VID value with CFI=1 matches packets
# * that have an 802.1Q header with a specified VID.
# *
# * - Testing for an exact match with a nonzero VID value with CFI=0 does
# * not make sense. The switch may reject this combination.
# *
# * - Testing with nxm_value=0, nxm_mask=0x0fff matches packets with no 802.1Q
# * header or with an 802.1Q header with a VID of 0.
# *
# * - Testing with nxm_value=0x1000, nxm_mask=0x1000 matches packets with
# * an 802.1Q header that has any VID value.
# */
#===============================================================================
OXM_OF_VLAN_VID = OXM_HEADER (0x8000, OFPXMT_OFB_VLAN_VID, 2),
OXM_OF_VLAN_VID_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_VLAN_VID, 2),
#===============================================================================
# /* 802.1Q PCP.
# *
# * For a packet with an 802.1Q header, this is the VLAN-PCP from the
# * outermost tag. For a packet with no 802.1Q header, this has value
# * 0.
# *
# * Prereqs: OXM_OF_VLAN_VID must be different from OFPVID_NONE.
# *
# * Format: 8-bit integer with 5 most-significant bits forced to 0.
# * Only the lower 3 bits have meaning.
# *
# * Masking: Not maskable.
# */
#===============================================================================
OXM_OF_VLAN_PCP = OXM_HEADER (0x8000, OFPXMT_OFB_VLAN_PCP, 1),
#===============================================================================
# /* The Diff Serv Code Point (DSCP) bits of the IP header.
# * Part of the IPv4 ToS field or the IPv6 Traffic Class field.
# *
# * Prereqs: OXM_OF_ETH_TYPE must be either 0x0800 or 0x86dd.
# *
# * Format: 8-bit integer with 2 most-significant bits forced to 0.
# * Only the lower 6 bits have meaning.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IP_DSCP = OXM_HEADER (0x8000, OFPXMT_OFB_IP_DSCP, 1),
#===============================================================================
# /* The ECN bits of the IP header.
# * Part of the IPv4 ToS field or the IPv6 Traffic Class field.
# *
# * Prereqs: OXM_OF_ETH_TYPE must be either 0x0800 or 0x86dd.
# *
# * Format: 8-bit integer with 6 most-significant bits forced to 0.
# * Only the lower 2 bits have meaning.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IP_ECN = OXM_HEADER (0x8000, OFPXMT_OFB_IP_ECN, 1),
#===============================================================================
# /* The "protocol" byte in the IP header.
# *
# * Prereqs: OXM_OF_ETH_TYPE must be either 0x0800 or 0x86dd.
# *
# * Format: 8-bit integer.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IP_PROTO = OXM_HEADER (0x8000, OFPXMT_OFB_IP_PROTO, 1),
#===============================================================================
# /* The source or destination address in the IP header.
# *
# * Prereqs: OXM_OF_ETH_TYPE must match 0x0800 exactly.
# *
# * Format: 32-bit integer in network byte order.
# *
# * Masking: Arbitrary masks.
# */
#===============================================================================
OXM_OF_IPV4_SRC = OXM_HEADER (0x8000, OFPXMT_OFB_IPV4_SRC, 4),
OXM_OF_IPV4_SRC_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_IPV4_SRC, 4),
OXM_OF_IPV4_DST = OXM_HEADER (0x8000, OFPXMT_OFB_IPV4_DST, 4),
OXM_OF_IPV4_DST_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_IPV4_DST, 4),
#===============================================================================
# /* The source or destination port in the TCP header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must be either 0x0800 or 0x86dd.
# * OXM_OF_IP_PROTO must match 6 exactly.
# *
# * Format: 16-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_TCP_SRC = OXM_HEADER (0x8000, OFPXMT_OFB_TCP_SRC, 2),
OXM_OF_TCP_DST = OXM_HEADER (0x8000, OFPXMT_OFB_TCP_DST, 2),
#===============================================================================
# /* The source or destination port in the UDP header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match either 0x0800 or 0x86dd.
# * OXM_OF_IP_PROTO must match 17 exactly.
# *
# * Format: 16-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_UDP_SRC = OXM_HEADER (0x8000, OFPXMT_OFB_UDP_SRC, 2),
OXM_OF_UDP_DST = OXM_HEADER (0x8000, OFPXMT_OFB_UDP_DST, 2),
#===============================================================================
# /* The source or destination port in the SCTP header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match either 0x0800 or 0x86dd.
# * OXM_OF_IP_PROTO must match 132 exactly.
# *
# * Format: 16-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_SCTP_SRC = OXM_HEADER (0x8000, OFPXMT_OFB_SCTP_SRC, 2),
OXM_OF_SCTP_DST = OXM_HEADER (0x8000, OFPXMT_OFB_SCTP_DST, 2),
#===============================================================================
# /* The type or code in the ICMP header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x0800 exactly.
# * OXM_OF_IP_PROTO must match 1 exactly.
# *
# * Format: 8-bit integer.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_ICMPV4_TYPE = OXM_HEADER (0x8000, OFPXMT_OFB_ICMPV4_TYPE, 1),
OXM_OF_ICMPV4_CODE = OXM_HEADER (0x8000, OFPXMT_OFB_ICMPV4_CODE, 1),
#===============================================================================
# /* ARP opcode.
# *
# * For an Ethernet+IP ARP packet, the opcode in the ARP header. Always 0
# * otherwise.
# *
# * Prereqs: OXM_OF_ETH_TYPE must match 0x0806 exactly.
# *
# * Format: 16-bit integer in network byte order.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_ARP_OP = OXM_HEADER (0x8000, OFPXMT_OFB_ARP_OP, 2),
#===============================================================================
# /* For an Ethernet+IP ARP packet, the source or target protocol address
# * in the ARP header. Always 0 otherwise.
# *
# * Prereqs: OXM_OF_ETH_TYPE must match 0x0806 exactly.
# *
# * Format: 32-bit integer in network byte order.
# *
# * Masking: Arbitrary masks.
# */
#===============================================================================
OXM_OF_ARP_SPA = OXM_HEADER (0x8000, OFPXMT_OFB_ARP_SPA, 4),
OXM_OF_ARP_SPA_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_ARP_SPA, 4),
OXM_OF_ARP_TPA = OXM_HEADER (0x8000, OFPXMT_OFB_ARP_TPA, 4),
OXM_OF_ARP_TPA_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_ARP_TPA, 4),
#===============================================================================
# /* For an Ethernet+IP ARP packet, the source or target hardware address
# * in the ARP header. Always 0 otherwise.
# *
# * Prereqs: OXM_OF_ETH_TYPE must match 0x0806 exactly.
# *
# * Format: 48-bit Ethernet MAC address.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_ARP_SHA = OXM_HEADER (0x8000, OFPXMT_OFB_ARP_SHA, 6),
OXM_OF_ARP_THA = OXM_HEADER (0x8000, OFPXMT_OFB_ARP_THA, 6),
#===============================================================================
# /* The source or destination address in the IPv6 header.
# *
# * Prereqs: OXM_OF_ETH_TYPE must match 0x86dd exactly.
# *
# * Format: 128-bit IPv6 address.
# *
# * Masking: Arbitrary masks.
# */
#===============================================================================
OXM_OF_IPV6_SRC = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_SRC, 16),
OXM_OF_IPV6_SRC_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_IPV6_SRC, 16),
OXM_OF_IPV6_DST = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_DST, 16),
OXM_OF_IPV6_DST_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_IPV6_DST, 16),
#===============================================================================
# /* The IPv6 Flow Label
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x86dd exactly
# *
# * Format: 32-bit integer with 12 most-significant bits forced to 0.
# * Only the lower 20 bits have meaning.
# *
# * Masking: Arbitrary masks.
# */
#===============================================================================
OXM_OF_IPV6_FLABEL = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_FLABEL, 4),
OXM_OF_IPV6_FLABEL_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_IPV6_FLABEL, 4),
#===============================================================================
# /* The type or code in the ICMPv6 header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x86dd exactly.
# * OXM_OF_IP_PROTO must match 58 exactly.
# *
# * Format: 8-bit integer.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_ICMPV6_TYPE = OXM_HEADER (0x8000, OFPXMT_OFB_ICMPV6_TYPE, 1),
OXM_OF_ICMPV6_CODE = OXM_HEADER (0x8000, OFPXMT_OFB_ICMPV6_CODE, 1),
#===============================================================================
# /* The target address in an IPv6 Neighbor Discovery message.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x86dd exactly.
# * OXM_OF_IP_PROTO must match 58 exactly.
# * OXM_OF_ICMPV6_TYPE must be either 135 or 136.
# *
# * Format: 128-bit IPv6 address.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IPV6_ND_TARGET = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_ND_TARGET, 16),
#===============================================================================
# /* The source link-layer address option in an IPv6 Neighbor Discovery
# * message.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x86dd exactly.
# * OXM_OF_IP_PROTO must match 58 exactly.
# * OXM_OF_ICMPV6_TYPE must be exactly 135.
# *
# * Format: 48-bit Ethernet MAC address.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IPV6_ND_SLL = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_ND_SLL, 6),
#===============================================================================
# /* The target link-layer address option in an IPv6 Neighbor Discovery
# * message.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x86dd exactly.
# * OXM_OF_IP_PROTO must match 58 exactly.
# * OXM_OF_ICMPV6_TYPE must be exactly 136.
# *
# * Format: 48-bit Ethernet MAC address.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_IPV6_ND_TLL = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_ND_TLL, 6),
#===============================================================================
# /* The LABEL in the first MPLS shim header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x8847 or 0x8848 exactly.
# *
# * Format: 32-bit integer in network byte order with 12 most-significant
# * bits forced to 0. Only the lower 20 bits have meaning.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_MPLS_LABEL = OXM_HEADER (0x8000, OFPXMT_OFB_MPLS_LABEL, 4),
#===============================================================================
# /* The TC in the first MPLS shim header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x8847 or 0x8848 exactly.
# *
# * Format: 8-bit integer with 5 most-significant bits forced to 0.
# * Only the lower 3 bits have meaning.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_MPLS_TC = OXM_HEADER (0x8000, OFPXMT_OFB_MPLS_TC, 1),
#===============================================================================
# /* The BoS bit in the first MPLS shim header.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x8847 or 0x8848 exactly.
# *
# * Format: 8-bit integer with 7 most-significant bits forced to 0.
# * Only the lowest bit have a meaning.
# *
# * Masking: Not maskable. */
#===============================================================================
OXM_OF_MPLS_BOS = OXM_HEADER (0x8000, OFPXMT_OFB_MPLS_BOS, 1),
#===============================================================================
# /* IEEE 802.1ah I-SID.
# *
# * For a packet with a PBB header, this is the I-SID from the
# * outermost service tag.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x88E7 exactly.
# *
# * Format: 24-bit integer in network byte order.
# *
# * Masking: Arbitrary masks. */
#===============================================================================
OXM_OF_PBB_ISID = OXM_HEADER (0x8000, OFPXMT_OFB_PBB_ISID, 3),
OXM_OF_PBB_ISID_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_PBB_ISID, 3),
#===============================================================================
# /* Logical Port Metadata.
# *
# * Metadata associated with a logical port.
# * If the logical port performs encapsulation and decapsulation, this
# * is the demultiplexing field from the encapsulation header.
# * For example, for a packet received via GRE tunnel including a (32-bit) key,
# * the key is stored in the low 32-bits and the high bits are zeroed.
# * For a MPLS logical port, the low 20 bits represent the MPLS Label.
# * For a VxLAN logical port, the low 24 bits represent the VNI.
# * If the packet is not received through a logical port, the value is 0.
# *
# * Prereqs: None.
# *
# * Format: 64-bit integer in network byte order.
# *
# * Masking: Arbitrary masks. */
#===============================================================================
OXM_OF_TUNNEL_ID = OXM_HEADER (0x8000, OFPXMT_OFB_TUNNEL_ID, 8),
OXM_OF_TUNNEL_ID_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_TUNNEL_ID, 8),
#===============================================================================
# /* The IPv6 Extension Header pseudo-field.
# *
# * Prereqs:
# * OXM_OF_ETH_TYPE must match 0x86dd exactly
# *
# * Format: 16-bit integer with 7 most-significant bits forced to 0.
# * Only the lower 9 bits have meaning.
# *
# * Masking: Maskable. */
#===============================================================================
OXM_OF_IPV6_EXTHDR = OXM_HEADER (0x8000, OFPXMT_OFB_IPV6_EXTHDR, 2),
OXM_OF_IPV6_EXTHDR_W = OXM_HEADER_W(0x8000, OFPXMT_OFB_IPV6_EXTHDR, 2)
)
'''
/* Bit definitions for IPv6 Extension Header pseudo-field. */
'''
ofp_ipv6exthdr_flags = enum('ofp_ipv6exthdr_flags', globals(), uint16, True,
OFPIEH_NONEXT = 1 << 0, # /* "No next header" encountered. */
OFPIEH_ESP = 1 << 1, # /* Encrypted Sec Payload header present. */
OFPIEH_AUTH = 1 << 2, # /* Authentication header present. */
OFPIEH_DEST = 1 << 3, # /* 1 or 2 dest headers present. */
OFPIEH_FRAG = 1 << 4, # /* Fragment header present. */
OFPIEH_ROUTER = 1 << 5, # /* Router header present. */
OFPIEH_HOP = 1 << 6, # /* Hop-by-hop header present. */
OFPIEH_UNREP = 1 << 7, # /* Unexpected repeats encountered. */
OFPIEH_UNSEQ = 1 << 8 # /* Unexpected sequencing encountered. */
)
ofp_oxm = nstruct(
(ofp_oxm_header, 'header'),
name = 'ofp_oxm',
padding = 1,
size = lambda x: OXM_LENGTH(x.header) + 4
)
'''
/* Header for OXM experimenter match fields.
* The experimenter class should not use OXM_HEADER() macros for defining
* fields due to this extra header. */
'''
ofp_oxm_experimenter = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same form as in struct ofp_experimenter_header. */
base = ofp_oxm,
name = 'ofp_oxm_experimenter',
criteria = lambda x: OXM_CLASS(x.header) == OFPXMC_EXPERIMENTER,
init = packvalue(OXM_HEADER(OFPXMC_EXPERIMENTER, 0, 4), 'header')
)
ofp_oxm_nomask = nstruct(
(hexraw, 'value'),
base = ofp_oxm,
criteria = lambda x: OXM_CLASS(x.header) != OFPXMC_EXPERIMENTER and not OXM_HASMASK(x.header),
init = packvalue(OXM_OF_IN_PORT, 'header'),
name = 'ofp_oxm_nomask'
)
_ofp_oxm_mask_value = nstruct(
(hexraw, 'value'),
name = 'ofp_oxm_mask_value',
size = lambda x: OXM_LENGTH(x.header) // 2,
padding = 1
)
ofp_oxm_mask = nstruct(
(_ofp_oxm_mask_value,),
(hexraw, 'mask'),
base = ofp_oxm,
criteria = lambda x: OXM_CLASS(x.header) != OFPXMC_EXPERIMENTER and OXM_HASMASK(x.header),
init = packvalue(OXM_OF_METADATA_W, 'header'),
name = 'ofp_oxm_mask',
)
def create_oxm(header, value = None, mask = None):
if OXM_HASMASK(header):
oxm = ofp_oxm_mask.new()
size = OXM_LENGTH(header) // 2
else:
oxm = ofp_oxm_nomask.new()
size = OXM_LENGTH(header)
oxm.header = header
oxm.value = create_binary(value, size)
if OXM_HASMASK(header):
oxm.mask = create_binary(mask, size)
oxm._pack()
oxm._autosubclass()
return oxm
ofp_match_oxm = nstruct(
(ofp_oxm[0], 'oxm_fields'),
base = ofp_match,
criteria = lambda x: x.type == OFPMT_OXM,
init = packvalue(OFPMT_OXM, 'type'),
name = 'ofp_match_oxm'
)
ofp_oxm_mask_ipv4 = nstruct(name = 'ofp_oxm_mask_ipv4',
base = ofp_oxm_mask,
criteria = lambda x: x.header in (OXM_OF_IPV4_SRC_W, OXM_OF_IPV4_DST_W, OXM_OF_ARP_SPA_W, OXM_OF_ARP_TPA_W),
init = packvalue(OXM_OF_IPV4_SRC_W, 'header'),
extend = {'value' : ip4_addr_bytes, 'mask' : ip4_addr_bytes}
)
ofp_oxm_nomask_ipv4 = nstruct(name = 'ofp_oxm_nomask_ipv4',
base = ofp_oxm_nomask,
criteria = lambda x: x.header in (OXM_OF_IPV4_SRC, OXM_OF_IPV4_DST, OXM_OF_ARP_SPA, OXM_OF_ARP_TPA),
init = packvalue(OXM_OF_IPV4_SRC, 'header'),
extend = {'value' : ip4_addr_bytes}
)
ofp_oxm_mask_eth = nstruct(name = 'ofp_oxm_mask_eth',
base = ofp_oxm_mask,
criteria = lambda x: x.header in (OXM_OF_ETH_SRC_W, OXM_OF_ETH_DST_W),
init = packvalue(OXM_OF_ETH_SRC_W, 'header'),
extend = {'value' : mac_addr_bytes, 'mask' : mac_addr_bytes})
ofp_oxm_nomask_eth = nstruct(name = 'ofp_oxm_nomask_eth',
base = ofp_oxm_nomask,
criteria = lambda x: x.header in (OXM_OF_ETH_SRC, OXM_OF_ETH_DST, OXM_OF_IPV6_ND_SLL, OXM_OF_IPV6_ND_TLL, OXM_OF_ARP_SHA, OXM_OF_ARP_THA),
init = packvalue(OXM_OF_ETH_SRC, 'header'),
extend = {'value' : mac_addr_bytes})
ofp_port_no_raw = _rawtype()
ofp_port_no_raw.formatter = lambda x: ofp_port_no.formatter(ofp_port_no.parse(x)[0])
ofp_oxm_nomask_port = nstruct(name = 'ofp_oxm_nomask_port',
base = ofp_oxm_nomask,
criteria = lambda x: x.header == OXM_OF_IN_PORT,
init = packvalue(OXM_OF_IN_PORT, 'header'),
extend = {'value': ofp_port_no_raw}
)
ofp_ipv6exthdr_flags_raw = _rawtype()
ofp_ipv6exthdr_flags_raw.formatter = lambda x: ofp_ipv6exthdr_flags.formatter(ofp_ipv6exthdr_flags.parse(x)[0])
ofp_oxm_nomask_exthdr = nstruct(name = 'ofp_oxm_nomask_exthdr',
base = ofp_oxm_nomask,
criteria = lambda x: x.header == OXM_OF_IPV6_EXTHDR,
init = packvalue(OXM_OF_IPV6_EXTHDR, 'header'),
extend = {'value': ofp_ipv6exthdr_flags_raw})
ofp_oxm_mask_exthdr = nstruct(name = 'ofp_oxm_mask_exthdr',
base = ofp_oxm_mask,
criteria = lambda x: x.header == OXM_OF_IPV6_EXTHDR_W,
init = packvalue(OXM_OF_IPV6_EXTHDR_W, 'header'),
extend = {'value': ofp_ipv6exthdr_flags_raw, 'mask': ofp_ipv6exthdr_flags_raw})
ethtype_raw = _rawtype()
ethtype_raw.formatter = lambda x: ethertype.formatter(ethertype.parse(x)[0])
ofp_oxm_nomask_ethertype = nstruct(name = 'ofp_oxm_nomask_ethertype',
base = ofp_oxm_nomask,
criteria = lambda x: x.header == OXM_OF_ETH_TYPE,
init = packvalue(OXM_OF_ETH_TYPE, 'header'),
extend = {'value': ethtype_raw})
arpop_raw = _rawtype()
arpop_raw.formatter = lambda x: arp_op_code.formatter(arp_op_code.parse(x)[0])
ofp_oxm_nomask_arpopcode = nstruct(name = 'ofp_oxm_nomask_arpopcode',
base = ofp_oxm_nomask,
criteria = lambda x: x.header == OXM_OF_ARP_OP,
init = packvalue(OXM_OF_ARP_OP, 'header'),
extend = {'value': arpop_raw})
ip_protocol_raw = _rawtype()
ip_protocol_raw.formatter = lambda x: ip_protocol.formatter(ip_protocol.parse(x)[0])
ofp_oxm_nomask_ip_protocol = nstruct(name = 'ofp_oxm_nomask_ip_protocol',
base = ofp_oxm_nomask,
criteria = lambda x: x.header == OXM_OF_IP_PROTO,
init = packvalue(OXM_OF_IP_PROTO, 'header'),
extend = {'value': ip_protocol_raw})
ofp_oxm_nomask_ipv6 = nstruct(name = 'ofp_oxm_nomask_ipv6',
base = ofp_oxm_nomask,
criteria = lambda x: x.header in (OXM_OF_IPV6_SRC, OXM_OF_IPV6_DST, OXM_OF_IPV6_ND_TARGET),
init = packvalue(OXM_OF_IPV6_SRC, 'header'),
extend = {'value': ip6_addr_bytes})
ofp_oxm_mask_ipv6 = nstruct(name = 'ofp_oxm_mask_ipv6',
base = ofp_oxm_mask,
criteria = lambda x: x.header in (OXM_OF_IPV6_SRC_W, OXM_OF_IPV6_DST_W),
init = packvalue(OXM_OF_IPV6_SRC, 'header'),
extend = {'value': ip6_addr_bytes, 'mask': ip6_addr_bytes})
'''
/* ## ----------------- ## */
/* ## OpenFlow Actions. ## */
/* ## ----------------- ## */
'''
ofp_action_type = enum('ofp_action_type', globals(), uint16,
OFPAT_OUTPUT = 0, #/* Output to switch port. */
OFPAT_COPY_TTL_OUT = 11, #/* Copy TTL "outwards" -- from next-to-outermost to outermost */
OFPAT_COPY_TTL_IN = 12, #/* Copy TTL "inwards" -- from outermost to next-to-outermost */
OFPAT_SET_MPLS_TTL = 15, #/* MPLS TTL */
OFPAT_DEC_MPLS_TTL = 16, #/* Decrement MPLS TTL */
OFPAT_PUSH_VLAN = 17, #/* Push a new VLAN tag */
OFPAT_POP_VLAN = 18, #/* Pop the outer VLAN tag */
OFPAT_PUSH_MPLS = 19, #/* Push a new MPLS tag */
OFPAT_POP_MPLS = 20, #/* Pop the outer MPLS tag */
OFPAT_SET_QUEUE = 21, #/* Set queue id when outputting to a port */
OFPAT_GROUP = 22, #/* Apply group. */
OFPAT_SET_NW_TTL = 23, #/* IP TTL. */
OFPAT_DEC_NW_TTL = 24, #/* Decrement IP TTL. */
OFPAT_SET_FIELD = 25, #/* Set a header field using OXM TLV format. */
OFPAT_PUSH_PBB = 26, #/* Push a new PBB service tag (I-TAG) */
OFPAT_POP_PBB = 27, #/* Pop the outer PBB service tag (I-TAG) */
OFPAT_EXPERIMENTER = 0xffff
)
'''
/* Action header that is common to all actions. The length includes the
* header and any padding used to make the action 64-bit aligned.
* NB: The length of an action *must* always be a multiple of eight. */
'''
ofp_action = nstruct((ofp_action_type, 'type'),
(uint16, 'len'),
name = 'ofp_action',
size = lambda x: x.len,
prepack = packsize('len'),
classifier = lambda x: x.type
)
ofp_controller_max_len = enum('ofp_controller_max_len', globals(), uint16,
OFPCML_MAX = 0xffe5, #/* maximum max_len value which can be used to request a specific byte length. */
OFPCML_NO_BUFFER = 0xffff #/* indicates that no buffering should be applied and the whole packet is to be sent to the controller. */
)
'''
/* Action structure for OFPAT_OUTPUT, which sends packets out 'port'.
* When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
* number of bytes to send. A 'max_len' of zero means no bytes of the
* packet should be sent. A 'max_len' of OFPCML_NO_BUFFER means that
* the packet is not buffered and the complete packet is to be sent to
* the controller. */
'''
ofp_action_output = nstruct((ofp_port_no, 'port'),
(ofp_controller_max_len, 'max_len'),
(uint8[6],),
name = 'ofp_action_output',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_OUTPUT,
classifyby = (OFPAT_OUTPUT,),
init = packvalue(OFPAT_OUTPUT, 'type'))
'''
/* Action structure for OFPAT_SET_MPLS_TTL. */
'''
ofp_action_mpls_ttl = nstruct(
(uint8, 'mpls_ttl'), # /* MPLS TTL */
(uint8[3],),
name = 'ofp_action_mpls_ttl',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_MPLS_TTL,
classifyby = (OFPAT_SET_MPLS_TTL,),
init = packvalue(OFPAT_SET_MPLS_TTL, 'type')
)
'''
/* Action structure for OFPAT_PUSH_VLAN/MPLS/PBB. */
'''
ofp_action_push = nstruct(
(ethertype, 'ethertype'), # /* Ethertype */
(uint8[2],),
name = 'ofp_action_push',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_PUSH_VLAN or x.type == OFPAT_PUSH_MPLS or x.type == OFPAT_PUSH_PBB,
classifyby = (OFPAT_PUSH_VLAN, OFPAT_PUSH_MPLS, OFPAT_PUSH_PBB),
init = packvalue(OFPAT_PUSH_VLAN, 'type')
)
'''
/* Action structure for OFPAT_POP_MPLS. */
'''
ofp_action_pop_mpls = nstruct(
(ethertype, 'ethertype'), # /* Ethertype */
(uint8[2],),
name = 'ofp_action_pop_mpls',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_POP_MPLS,
classifyby = (OFPAT_POP_MPLS,),
init = packvalue(OFPAT_POP_MPLS, 'type')
)
'''
/* Action structure for OFPAT_GROUP. */
'''
ofp_action_group = nstruct(
(uint32, 'group_id'), # /* Group identifier. */
name = 'ofp_action_group',
base = ofp_action,
criteria = lambda x: x.type == OFPAT_GROUP,
classifyby = (OFPAT_GROUP,),
init = packvalue(OFPAT_GROUP, 'type')
)
'''
/* Action structure for OFPAT_SET_NW_TTL. */
'''
ofp_action_nw_ttl = nstruct(
(uint8, 'nw_ttl'), # /* IP TTL */
(uint8[3],),
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_NW_TTL,
classifyby = (OFPAT_SET_NW_TTL,),
init = packvalue(OFPAT_SET_NW_TTL, 'type'),
name = 'ofp_action_nw_ttl'
)
'''
/* Action structure for OFPAT_SET_FIELD. */
'''
ofp_action_set_field = nstruct(
(ofp_oxm, 'field'),
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_FIELD,
classifyby = (OFPAT_SET_FIELD,),
init = packvalue(OFPAT_SET_FIELD, 'type'),
name = 'ofp_action_set_field'
)
'''
/* Action header for OFPAT_EXPERIMENTER.
* The rest of the body is experimenter-defined. */
'''
ofp_action_experimenter = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same
# form as in struct
# ofp_experimenter_header. */
base = ofp_action,
criteria = lambda x: x.type == OFPAT_EXPERIMENTER,
classifyby = (OFPAT_EXPERIMENTER,),
init = packvalue(OFPAT_EXPERIMENTER, 'type'),
name = 'ofp_action_experimenter'
)
'''
/* ## ---------------------- ## */
/* ## OpenFlow Instructions. ## */
/* ## ---------------------- ## */
'''
ofp_instruction_type = enum('ofp_instruction_type', globals(), uint16,
OFPIT_GOTO_TABLE = 1, # /* Setup the next table in the lookup pipeline */
OFPIT_WRITE_METADATA = 2, # /* Setup the metadata field for use later in pipeline */
OFPIT_WRITE_ACTIONS = 3, # /* Write the action(s) onto the datapath action set */
OFPIT_APPLY_ACTIONS = 4, # /* Applies the action(s) immediately */
OFPIT_CLEAR_ACTIONS = 5, # /* Clears all actions from the datapath action set */
OFPIT_METER = 6, # /* Apply meter (rate limiter) */
OFPIT_EXPERIMENTER = 0xFFFF # /* Experimenter instruction */
)
'''
/* Instruction header that is common to all instructions. The length includes
* the header and any padding used to make the instruction 64-bit aligned.
* NB: The length of an instruction *must* always be a multiple of eight. */
'''
ofp_instruction = nstruct(
(ofp_instruction_type, 'type'), # /* Instruction type */
(uint16, 'len'), # /* Length of this struct in bytes. */
name = 'ofp_instruction',
size = lambda x: x.len,
prepack = packsize('len'),
classifier = lambda x: x.type
)
'''
/* Instruction structure for OFPIT_GOTO_TABLE */
'''
ofp_instruction_goto_table = nstruct(
(uint8, 'table_id'), # /* Set next table in the lookup pipeline */
(uint8[3],), # /* Pad to 64 bits. */
base = ofp_instruction,
name = 'ofp_instruction_goto_table',
criteria = lambda x: x.type == OFPIT_GOTO_TABLE,
classifyby = (OFPIT_GOTO_TABLE,),
init = packvalue(OFPIT_GOTO_TABLE, 'type')
)
'''
/* Instruction structure for OFPIT_WRITE_METADATA */
'''
ofp_instruction_write_metadata = nstruct(
(uint8[4],), # /* Align to 64-bits */
(uint64, 'metadata'), # /* Metadata value to write */
(uint64, 'metadata_mask'), # /* Metadata write bitmask */
base = ofp_instruction,
name = 'ofp_instruction_write_metadata',
criteria = lambda x: x.type == OFPIT_WRITE_METADATA,
classifyby = (OFPIT_WRITE_METADATA,),
init = packvalue(OFPIT_WRITE_METADATA, 'type')
)
'''
/* Instruction structure for OFPIT_WRITE/APPLY/CLEAR_ACTIONS */
'''
ofp_instruction_actions = nstruct(
(uint8[4],), # /* Align to 64-bits */
(ofp_action[0], 'actions'), # /* 0 or more actions associated with OFPIT_WRITE_ACTIONS and OFPIT_APPLY_ACTIONS */
base = ofp_instruction,
name = 'ofp_instruction_actions',
criteria = lambda x: x.type == OFPIT_WRITE_ACTIONS or x.type == OFPIT_APPLY_ACTIONS or x.type == OFPIT_CLEAR_ACTIONS,
classifyby = (OFPIT_WRITE_ACTIONS, OFPIT_APPLY_ACTIONS, OFPIT_CLEAR_ACTIONS),
init = packvalue(OFPIT_APPLY_ACTIONS, 'type')
)
'''
/* Instruction structure for OFPIT_METER */
'''
ofp_instruction_meter = nstruct(
(uint32, 'meter_id'), # /* Meter instance. */
base = ofp_instruction,
name = 'ofp_instruction_meter',
criteria = lambda x: x.type == OFPIT_METER,
classifyby = (OFPIT_METER,),
init = packvalue(OFPIT_METER, 'type')
)
'''
/* Instruction structure for experimental instructions */
'''
ofp_instruction_experimenter = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same form as in struct ofp_experimenter_header. */
# /* Experimenter-defined arbitrary additional data. */
base = ofp_instruction,
name = 'ofp_instruction_experimenter',
criteria = lambda x: x.type == OFPIT_EXPERIMENTER,
classifyby = (OFPIT_EXPERIMENTER,),
init = packvalue(OFPIT_EXPERIMENTER, 'type')
)
'''
/* Value used in "idle_timeout" and "hard_timeout" to indicate that the entry
* is permanent. */
'''
OFP_FLOW_PERMANENT = 0
'''
/* By default, choose a priority in the middle. */
'''
OFP_DEFAULT_PRIORITY = 0x8000
ofp_flow_mod_flags = ofp_flow_mod_flags.extend(globals(),
OFPFF_RESET_COUNTS = 1 << 2, # /* Reset flow packet and byte counts. */
OFPFF_NO_PKT_COUNTS = 1 << 3, # /* Don't keep track of packet count. */
OFPFF_NO_BYT_COUNTS = 1 << 4, # /* Don't keep track of byte count. */
)
'''
/* Special buffer-id to indicate 'no buffer' */
'''
ofp_buffer_id = enum('ofp_buffer_id', globals(), uint32,
OFP_NO_BUFFER = 0xffffffff
)
'''
/* Flow setup and teardown (controller -> datapath). */
'''
ofp_flow_mod = nstruct(
(uint64, 'cookie'), # /* Opaque controller-issued identifier. */
# /* Mask used to restrict the cookie bits
# that must match when the command is
# OFPFC_MODIFY* or OFPFC_DELETE*. A value
# of 0 indicates no restriction. */
(uint64, 'cookie_mask'),
# /* ID of the table to put the flow in.
# For OFPFC_DELETE_* commands, OFPTT_ALL
# can also be used to delete matching
# flows from all tables. */
(ofp_table, 'table_id'),
(ofp_flow_mod_command.astype(uint8), 'command'), # /* One of OFPFC_*. */
(uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */
(uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */
(uint16, 'priority'), # /* Priority level of flow entry. */
# /* Buffered packet to apply to, or
# OFP_NO_BUFFER.
# Not meaningful for OFPFC_DELETE*. */
(ofp_buffer_id, 'buffer_id'),
# /* For OFPFC_DELETE* commands, require
# matching entries to include this as an
# output port. A value of OFPP_ANY
# indicates no restriction. */
(ofp_port_no, 'out_port'),
# /* For OFPFC_DELETE* commands, require
# matching entries to include this as an
# output group. A value of OFPG_ANY
# indicates no restriction. */
(ofp_group, 'out_group'),
(ofp_flow_mod_flags, 'flags'), # /* Bitmap of OFPFF_* flags. */
(uint8[2],),
(ofp_match, 'match'), # /* Fields to match. Variable size. */
# /* The variable size and padded match is always followed by instructions. */
# /* Instruction set - 0 or more.
# The length of the instruction
# set is inferred from the
# length field in the header. */
(ofp_instruction[0], 'instructions'),
base = ofp_msg,
name = 'ofp_flow_mod',
criteria = lambda x: x.header.type == OFPT_FLOW_MOD,
classifyby = (OFPT_FLOW_MOD,),
init = packvalue(OFPT_FLOW_MOD, 'header', 'type')
)
'''
/* Group commands */
'''
ofp_group_mod_command = enum('ofp_group_mod_command', globals(),uint16,
OFPGC_ADD = 0, # /* New group. */
OFPGC_MODIFY = 1, # /* Modify all matching groups. */
OFPGC_DELETE = 2, # /* Delete all matching groups. */
)
'''
/* Bucket for use in groups. */
'''
ofp_bucket = nstruct(
(uint16, 'len'),
# /* Length of the bucket in bytes, including
# this header and any padding to make it
# 64-bit aligned. */
(uint16, 'weight'),
# /* Relative weight of bucket. Only
# defined for select groups. */
(ofp_port_no, 'watch_port'),
# /* Port whose state affects whether this
# bucket is live. Only required for fast
# failover groups. */
(ofp_group, 'watch_group'),
# /* Group whose state affects whether this
# bucket is live. Only required for fast
# failover groups. */
(uint8[4],),
(ofp_action[0], 'actions'),
# /* 0 or more actions associated with
# the bucket - The action list length
# is inferred from the length
# of the bucket. */
size = lambda x: x.len,
prepack = packsize('len'),
init = lambda x: (packvalue(OFPP_ANY, 'watch_port')(x), packvalue(OFPG_ANY, 'watch_group')(x)),
name = 'ofp_bucket'
)
'''
/* Group types. Values in the range [128, 255] are reserved for experimental
* use. */
'''
ofp_group_type = enum('ofp_group_type', globals(),uint8,
OFPGT_ALL = 0, # /* All (multicast/broadcast) group. */
OFPGT_SELECT = 1, # /* Select group. */
OFPGT_INDIRECT = 2, # /* Indirect group. */
OFPGT_FF = 3, # /* Fast failover group. */
)
'''
/* Group setup and teardown (controller -> datapath). */
'''
ofp_group_mod = nstruct(
(ofp_group_mod_command, 'command'), # /* One of OFPGC_*. */
(ofp_group_type, 'type'), # /* One of OFPGT_*. */
(uint8,), # /* Pad to 64 bits. */
(ofp_group, 'group_id'), # /* Group identifier. */
(ofp_bucket[0], 'buckets'), # /* The length of the bucket array is inferred from the length field in the header. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_GROUP_MOD,
classifyby = (OFPT_GROUP_MOD,),
init = packvalue(OFPT_GROUP_MOD, 'header', 'type'),
name = 'ofp_group_mod'
)
'''
/* Send packet (controller -> datapath). */
'''
def _ofp_packet_out_actions_packsize(x):
x.actions_len = x._realsize() - 8
ofp_packet_out_actions = nstruct(
(uint16, 'actions_len'),
(uint8[6],),
(ofp_action[0], 'actions'),
name = 'ofp_packet_out_actions',
size = lambda x: x.actions_len + 8,
prepack = _ofp_packet_out_actions_packsize,
padding = 1)
ofp_packet_out = nstruct(
(ofp_buffer_id, 'buffer_id'), # /* ID assigned by datapath (OFP_NO_BUFFER if none). */
(ofp_port_no, 'in_port'), # /* Packet's input port or OFPP_CONTROLLER. */
(ofp_packet_out_actions,),
(raw, 'data'),
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PACKET_OUT,
classifyby = (OFPT_PACKET_OUT,),
init = packvalue(OFPT_PACKET_OUT, 'header', 'type'),
name = 'ofp_packet_out'
)
'''
/* Packet received on port (datapath -> controller). */
'''
ofp_packet_in = nstruct(
(ofp_buffer_id, 'buffer_id'), # /* ID assigned by datapath. */
(uint16, 'total_len'), # /* Full length of frame. */
(ofp_packet_in_reason, 'reason'), # /* Reason packet is being sent (one of OFPR_*) */
(uint8, 'table_id'), # /* ID of the table that was looked up */
(uint64, 'cookie'), # /* Cookie of the flow entry that was looked up. */
(ofp_match, 'match'), # /* Packet metadata. Variable size. */
# /* The variable size and padded match is always followed by:
# - Exactly 2 all-zero padding bytes, then
# - An Ethernet frame whose length is inferred from header.length.
# The padding bytes preceding the Ethernet frame ensure that the IP
# header (if any) following the Ethernet header is 32-bit aligned.
#*/
(uint8[2],), # /* Align to 64 bit + 16 bit */
(raw, 'data'), # /* Ethernet frame */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_PACKET_IN,
classifyby = (OFPT_PACKET_IN,),
init = packvalue(OFPT_PACKET_IN, 'header', 'type'),
name = 'ofp_packet_in'
)
def get_oxm(fields, header):
v = [o.value for o in fields if o.header == header]
if v:
return v[0]
else:
return create_binary(0, OXM_LENGTH(header))
'''
/* Flow removed (datapath -> controller). */
'''
ofp_flow_removed = nstruct(
(uint64, 'cookie'), # /* Opaque controller-issued identifier. */
(uint16, 'priority'), # /* Priority level of flow entry. */
(ofp_flow_removed_reason, 'reason'), # /* One of OFPRR_*. */
(uint8, 'table_id'), # /* ID of the table */
(uint32, 'duration_sec'), # /* Time flow was alive in seconds. */
(uint32, 'duration_nsec'),# /* Time flow was alive in nanoseconds beyond duration_sec. */
(uint16, 'idle_timeout'), # /* Idle timeout from original flow mod. */
(uint16, 'hard_timeout'), # /* Hard timeout from original flow mod. */
(uint64, 'packet_count'),
(uint64, 'byte_count'),
(ofp_match, 'match'), # /* Description of fields. Variable size. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_FLOW_REMOVED,
classifyby = (OFPT_FLOW_REMOVED,),
init = packvalue(OFPT_FLOW_REMOVED, 'header', 'type'),
name = 'ofp_flow_removed'
)
'''
/* Meter numbering. Flow meters can use any number up to OFPM_MAX. */
'''
ofp_meter = enum('ofp_meter', globals(),uint32,
# /* Virtual meters. */
OFPM_SLOWPATH = 0xfffffffd, # /* Meter for slow datapath. */
OFPM_CONTROLLER = 0xfffffffe, # /* Meter for controller connection. */
OFPM_ALL = 0xffffffff, # /* Represents all meters for stat requests commands. */
)
# /* Last usable meter. */
OFPM_MAX = 0xffff0000
'''
/* Meter band types */
'''
ofp_meter_band_type = enum('ofp_meter_band_type', globals(), uint16,
OFPMBT_DROP = 1, # /* Drop packet. */
OFPMBT_DSCP_REMARK = 2, # /* Remark DSCP in the IP header. */
OFPMBT_EXPERIMENTER = 0xFFFF # /* Experimenter meter band. */
)
'''
/* Common header for all meter bands */
'''
ofp_meter_band = nstruct(
(ofp_meter_band_type, 'type'), # /* One of OFPMBT_*. */
(uint16, 'len'), # /* Length in bytes of this band. */
(uint32, 'rate'), # /* Rate for this band. */
(uint32, 'burst_size'), # /* Size of bursts. */
size = lambda x: x.len,
prepack = packsize('len'),
name = 'ofp_meter_band',
classifier = lambda x: x.type
)
'''
/* OFPMBT_DROP band - drop packets */
'''
ofp_meter_band_drop = nstruct(
(uint8[4],),
base = ofp_meter_band,
criteria = lambda x: x.type == OFPMBT_DROP,
classifyby = (OFPMBT_DROP,),
init = packvalue(OFPMBT_DROP, 'type'),
name = 'ofp_meter_band_drop'
)
'''
/* OFPMBT_DSCP_REMARK band - Remark DSCP in the IP header */
'''
ofp_meter_band_dscp_remark = nstruct(
(uint8, 'prec_level'), # /* Number of drop precedence level to add. */
(uint8[3],),
base = ofp_meter_band,
criteria = lambda x: x.type == OFPMBT_DSCP_REMARK,
classifyby = (OFPMBT_DSCP_REMARK,),
init = packvalue(OFPMBT_DSCP_REMARK, 'type'),
name = 'ofp_meter_band_dscp_remark'
)
'''
/* OFPMBT_EXPERIMENTER band - Experimenter type.
* The rest of the band is experimenter-defined. */
'''
ofp_meter_band_experimenter = nstruct(
#/* Experimenter ID which takes the same
# form as in struct
# ofp_experimenter_header. */
(experimenter_ids, 'experimenter'),
base = ofp_meter_band,
criteria = lambda x: x.type == OFPMBT_EXPERIMENTER,
classifyby = (OFPMBT_EXPERIMENTER,),
init = packvalue(OFPMBT_EXPERIMENTER, 'type'),
name = 'ofp_meter_band_experimenter'
)
'''
/* Meter commands */
'''
ofp_meter_mod_command = enum('ofp_meter_mod_command', globals(),uint16,
OFPMC_ADD = 0, # /* New meter. */
OFPMC_MODIFY = 1, # /* Modify specified meter. */
OFPMC_DELETE = 2, # /* Delete specified meter. */
)
'''
/* Meter configuration flags */
'''
ofp_meter_flags = enum('ofp_meter_flags', globals(),uint16,
OFPMF_KBPS = 1 << 0, # /* Rate value in kb/s (kilo-bit per second). */
OFPMF_PKTPS = 1 << 1, # /* Rate value in packet/sec. */
OFPMF_BURST = 1 << 2, # /* Do burst size. */
OFPMF_STATS = 1 << 3, # /* Collect statistics. */
)
'''
/* Meter configuration. OFPT_METER_MOD. */
'''
ofp_meter_mod = nstruct(
(ofp_meter_mod_command, 'command'), # /* One of OFPMC_*. */
(ofp_meter_flags, 'flags'), # /* Bitmap of OFPMF_* flags. */
(ofp_meter, 'meter_id'), # /* Meter instance. */
(ofp_meter_band[0], 'bands'),
#/* The band list length is inferred from the length field in the header. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_METER_MOD,
classifyby = (OFPT_METER_MOD,),
init = packvalue(OFPT_METER_MOD, 'header', 'type'),
name = 'ofp_meter_mod'
)
'''
/* ofp_error_msg 'code' values for OFPET_BAD_INSTRUCTION. 'data' contains at least
* the first 64 bytes of the failed request. */
'''
ofp_bad_instruction_code = enum('ofp_bad_instruction_code', globals(), uint16,
OFPBIC_UNKNOWN_INST = 0, #/* Unknown instruction. */
OFPBIC_UNSUP_INST = 1, #/* Switch or table does not support the
# instruction. */
OFPBIC_BAD_TABLE_ID = 2, #/* Invalid Table-ID specified. */
OFPBIC_UNSUP_METADATA = 3, #/* Metadata value unsupported by datapath. */
OFPBIC_UNSUP_METADATA_MASK = 4, #/* Metadata mask value unsupported by
# datapath. */
OFPBIC_BAD_EXPERIMENTER = 5, #/* Unknown experimenter id specified. */
OFPBIC_BAD_EXP_TYPE = 6, #/* Unknown instruction for experimenter id. */
OFPBIC_BAD_LEN = 7, #/* Length problem in instructions. */
OFPBIC_EPERM = 8, #/* Permissions error. */
)
'''
/* ofp_error_msg 'code' values for OFPET_BAD_MATCH. 'data' contains at least
* the first 64 bytes of the failed request. */
'''
ofp_bad_match_code = enum('ofp_bad_match_code', globals(), uint16,
OFPBMC_BAD_TYPE = 0, # /* Unsupported match type specified by the
# match */
OFPBMC_BAD_LEN = 1, # /* Length problem in match. */
OFPBMC_BAD_TAG = 2, # /* Match uses an unsupported tag/encap. */
OFPBMC_BAD_DL_ADDR_MASK = 3, # /* Unsupported datalink addr mask - switch
# does not support arbitrary datalink
# address mask. */
OFPBMC_BAD_NW_ADDR_MASK = 4, # /* Unsupported network addr mask - switch
# does not support arbitrary network
# address mask. */
OFPBMC_BAD_WILDCARDS = 5, # /* Unsupported combination of fields masked
# or omitted in the match. */
OFPBMC_BAD_FIELD = 6, # /* Unsupported field type in the match. */
OFPBMC_BAD_VALUE = 7, # /* Unsupported value in a match field. */
OFPBMC_BAD_MASK = 8, # /* Unsupported mask specified in the match,
# field is not dl-address or nw-address. */
OFPBMC_BAD_PREREQ = 9, # /* A prerequisite was not met. */
OFPBMC_DUP_FIELD = 10,# /* A field type was duplicated. */
OFPBMC_EPERM = 11,# /* Permissions error. */
)
'''
/* ofp_error_msg 'code' values for OFPET_FLOW_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_flow_mod_failed_code = enum('ofp_flow_mod_failed_code', globals(), uint16,
OFPFMFC_UNKNOWN = 0, # /* Unspecified error. */
OFPFMFC_TABLE_FULL = 1, # /* Flow not added because table was full. */
OFPFMFC_BAD_TABLE_ID = 2, # /* Table does not exist */
OFPFMFC_OVERLAP = 3, # /* Attempted to add overlapping flow with
# CHECK_OVERLAP flag set. */
OFPFMFC_EPERM = 4, # /* Permissions error. */
OFPFMFC_BAD_TIMEOUT = 5, # /* Flow not added because of unsupported
# idle/hard timeout. */
OFPFMFC_BAD_COMMAND = 6, # /* Unsupported or unknown command. */
OFPFMFC_BAD_FLAGS = 7, # /* Unsupported or unknown flags. */
)
'''
/* ofp_error_msg 'code' values for OFPET_GROUP_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_group_mod_failed_code = enum('ofp_group_mod_failed_code', globals(), uint16,
OFPGMFC_GROUP_EXISTS = 0, # /* Group not added because a group ADD
# attempted to replace an
# already-present group. */
OFPGMFC_INVALID_GROUP = 1, # /* Group not added because Group
# specified is invalid. */
OFPGMFC_WEIGHT_UNSUPPORTED = 2, # /* Switch does not support unequal load
# sharing with select groups. */
OFPGMFC_OUT_OF_GROUPS = 3, # /* The group table is full. */
OFPGMFC_OUT_OF_BUCKETS = 4, # /* The maximum number of action buckets
# for a group has been exceeded. */
OFPGMFC_CHAINING_UNSUPPORTED = 5, # /* Switch does not support groups that
# forward to groups. */
OFPGMFC_WATCH_UNSUPPORTED = 6, # /* This group cannot watch the watch_port
# or watch_group specified. */
OFPGMFC_LOOP = 7, # /* Group entry would cause a loop. */
OFPGMFC_UNKNOWN_GROUP = 8, # /* Group not modified because a group
# MODIFY attempted to modify a
# non-existent group. */
OFPGMFC_CHAINED_GROUP = 9, # /* Group not deleted because another
# group is forwarding to it. */
OFPGMFC_BAD_TYPE = 10,# /* Unsupported or unknown group type. */
OFPGMFC_BAD_COMMAND = 11,# /* Unsupported or unknown command. */
OFPGMFC_BAD_BUCKET = 12,# /* Error in bucket. */
OFPGMFC_BAD_WATCH = 13,# /* Error in watch port/group. */
OFPGMFC_EPERM = 14,# /* Permissions error. */
)
'''
/* ofp_error_msg 'code' values for OFPET_PORT_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_port_mod_failed_code = enum('ofp_port_mod_failed_code', globals(), uint16,
OFPPMFC_BAD_PORT = 0, # /* Specified port number does not exist. */
OFPPMFC_BAD_HW_ADDR = 1, # /* Specified hardware address does not
# * match the port number. */
OFPPMFC_BAD_CONFIG = 2, # /* Specified config is invalid. */
OFPPMFC_BAD_ADVERTISE = 3, # /* Specified advertise is invalid. */
OFPPMFC_EPERM = 4, # /* Permissions error. */
)
'''
/* ofp_error_msg 'code' values for OFPET_TABLE_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_table_mod_failed_code = enum('ofp_table_mod_failed_code', globals(), uint16,
OFPTMFC_BAD_TABLE = 0, # /* Specified table does not exist. */
OFPTMFC_BAD_CONFIG = 1, # /* Specified config is invalid. */
OFPTMFC_EPERM = 2, # /* Permissions error. */
)
'''
/* ofp_error msg 'code' values for OFPET_QUEUE_OP_FAILED. 'data' contains
* at least the first 64 bytes of the failed request */
'''
ofp_queue_op_failed_code = enum('ofp_queue_op_failed_code', globals(), uint16,
OFPQOFC_BAD_PORT = 0, # /* Invalid port (or port does not exist). */
OFPQOFC_BAD_QUEUE = 1, # /* Queue does not exist. */
OFPQOFC_EPERM = 2, # /* Permissions error. */
)
'''
/* ofp_error_msg 'code' values for OFPET_SWITCH_CONFIG_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_switch_config_failed_code = enum('ofp_switch_config_failed_code', globals(), uint16,
OFPSCFC_BAD_FLAGS = 0, # /* Specified flags is invalid. */
OFPSCFC_BAD_LEN = 1, # /* Specified len is invalid. */
OFPSCFC_EPERM = 2, # /* Permissions error. */
)
'''
/* ofp_error_msg 'code' values for OFPET_ROLE_REQUEST_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_role_request_failed_code = enum('ofp_role_request_failed_code', globals(), uint16,
OFPRRFC_STALE = 0, # /* Stale Message: old generation_id. */
OFPRRFC_UNSUP = 1, # /* Controller role change unsupported. */
OFPRRFC_BAD_ROLE = 2, # /* Invalid role. */
)
'''
/* ofp_error_msg 'code' values for OFPET_METER_MOD_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_meter_mod_failed_code = enum('ofp_meter_mod_failed_code', globals(), uint16,
OFPMMFC_UNKNOWN = 0, # /* Unspecified error. */
OFPMMFC_METER_EXISTS = 1, # /* Meter not added because a Meter ADD
# * attempted to replace an existing Meter. */
OFPMMFC_INVALID_METER = 2, # /* Meter not added because Meter specified
# * is invalid,
# * or invalid meter in meter action. */
OFPMMFC_UNKNOWN_METER = 3, # /* Meter not modified because a Meter MODIFY
# * attempted to modify a non-existent Meter,
# * or bad meter in meter action. */
OFPMMFC_BAD_COMMAND = 4, # /* Unsupported or unknown command. */
OFPMMFC_BAD_FLAGS = 5, # /* Flag configuration unsupported. */
OFPMMFC_BAD_RATE = 6, # /* Rate unsupported. */
OFPMMFC_BAD_BURST = 7, # /* Burst size unsupported. */
OFPMMFC_BAD_BAND = 8, # /* Band unsupported. */
OFPMMFC_BAD_BAND_VALUE = 9,# /* Band value unsupported. */
OFPMMFC_OUT_OF_METERS = 10,# /* No more meters available. */
OFPMMFC_OUT_OF_BANDS = 11,# /* The maximum number of properties
# * for a meter has been exceeded. */
)
'''
/* ofp_error_msg 'code' values for OFPET_TABLE_FEATURES_FAILED. 'data' contains
* at least the first 64 bytes of the failed request. */
'''
ofp_table_features_failed_code = enum('ofp_table_features_failed_code', globals(), uint16,
OFPTFFC_BAD_TABLE = 0, # /* Specified table does not exist. */
OFPTFFC_BAD_METADATA = 1, # /* Invalid metadata mask. */
OFPTFFC_BAD_TYPE = 2, # /* Unknown property type. */
OFPTFFC_BAD_LEN = 3, # /* Length problem in properties. */
OFPTFFC_BAD_ARGUMENT = 4, # /* Unsupported property value. */
OFPTFFC_EPERM = 5, # /* Permissions error. */
)
ofp_multipart_type = enum('ofp_multipart_type', globals(), uint16,
# /* Description of this OpenFlow switch.
# * The request body is empty.
# * The reply body is struct ofp_desc. */
OFPMP_DESC = 0,
# /* Individual flow statistics.
# * The request body is struct ofp_flow_stats_request.
# * The reply body is an array of struct ofp_flow_stats. */
OFPMP_FLOW = 1,
# /* Aggregate flow statistics.
# * The request body is struct ofp_aggregate_stats_request.
# * The reply body is struct ofp_aggregate_stats_reply. */
OFPMP_AGGREGATE = 2,
# /* Flow table statistics.
# * The request body is empty.
# * The reply body is an array of struct ofp_table_stats. */
OFPMP_TABLE = 3,
# /* Port statistics.
# * The request body is struct ofp_port_stats_request.
# * The reply body is an array of struct ofp_port_stats. */
OFPMP_PORT_STATS = 4,
# /* Queue statistics for a port
# * The request body is struct ofp_queue_stats_request.
# * The reply body is an array of struct ofp_queue_stats */
OFPMP_QUEUE = 5,
# /* Group counter statistics.
# * The request body is struct ofp_group_stats_request.
# * The reply is an array of struct ofp_group_stats. */
OFPMP_GROUP = 6,
# /* Group description.
# * The request body is empty.
# * The reply body is an array of struct ofp_group_desc. */
OFPMP_GROUP_DESC = 7,
# /* Group features.
# * The request body is empty.
# * The reply body is struct ofp_group_features. */
OFPMP_GROUP_FEATURES = 8,
# /* Meter statistics.
# * The request body is struct ofp_meter_multipart_requests.
# * The reply body is an array of struct ofp_meter_stats. */
OFPMP_METER = 9,
# /* Meter configuration.
# * The request body is struct ofp_meter_multipart_requests.
# * The reply body is an array of struct ofp_meter_config. */
OFPMP_METER_CONFIG = 10,
# /* Meter features.
# * The request body is empty.
# * The reply body is struct ofp_meter_features. */
OFPMP_METER_FEATURES = 11,
# /* Table features.
# * The request body is either empty or contains an array of
# * struct ofp_table_features containing the controller's
# * desired view of the switch. If the switch is unable to
# * set the specified view an error is returned.
# * The reply body is an array of struct ofp_table_features. */
OFPMP_TABLE_FEATURES = 12,
# /* Port description.
# * The request body is empty.
# * The reply body is an array of struct ofp_port. */
OFPMP_PORT_DESC = 13,
# /* Experimenter extension.
# * The request and reply bodies begin with
# * struct ofp_experimenter_multipart_header.
# * The request and reply bodies are otherwise experimenter-defined. */
OFPMP_EXPERIMENTER = 0xffff
)
'''
/* Backward compatibility with 1.3.1 - avoid breaking the API. */
'''
ofp_multipart_types = ofp_multipart_type
ofp_multipart_request_flags = enum('ofp_multipart_request_flags', globals(), uint16, True,
OFPMPF_REQ_MORE = 1 << 0 # /* More requests to follow. */
)
ofp_multipart_request = nstruct(
(ofp_multipart_type, 'type'), # /* One of the OFPMP_* constants. */
(ofp_multipart_request_flags, 'flags'), # /* OFPMPF_REQ_* flags. */
(uint8[4],),
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_MULTIPART_REQUEST,
classifyby = (OFPT_MULTIPART_REQUEST,),
init = packvalue(OFPT_MULTIPART_REQUEST, 'header', 'type'),
classifier = lambda x: x.type,
name = 'ofp_multipart_request'
)
ofp_multipart_reply_flags = enum('ofp_multipart_reply_flags', globals(), uint16, True,
OFPMPF_REPLY_MORE = 1 << 0 # /* More replies to follow. */
)
ofp_multipart_reply = nstruct(
(ofp_multipart_type, 'type'), # /* One of the OFPMP_* constants. */
(ofp_multipart_reply_flags, 'flags'), # /* OFPMPF_REPLY_* flags. */
(uint8[4],),
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_MULTIPART_REPLY,
classifyby = (OFPT_MULTIPART_REPLY,),
init = packvalue(OFPT_MULTIPART_REPLY, 'header', 'type'),
classifier = lambda x: x.type,
name = 'ofp_multipart_reply'
)
DESC_STR_LEN = 256
SERIAL_NUM_LEN = 32
'''
/* Body of reply to OFPMP_DESC request. Each entry is a NULL-terminated
* ASCII string. */
'''
ofp_desc = nstruct(
(char[DESC_STR_LEN], 'mfr_desc'), # /* Manufacturer description. */
(char[DESC_STR_LEN], 'hw_desc'), # /* Hardware description. */
(char[DESC_STR_LEN], 'sw_desc'), # /* Software description. */
(char[SERIAL_NUM_LEN], 'serial_num'),# /* Serial number. */
(char[DESC_STR_LEN], 'dp_desc'), # /* Human readable description of datapath. */
name = 'ofp_desc'
)
ofp_desc_reply = nstruct(
(ofp_desc,),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_DESC,
classifyby = (OFPMP_DESC,),
init = packvalue(OFPMP_DESC, 'type'),
name = 'ofp_desc_reply'
)
'''
/* Body for ofp_multipart_request of type OFPMP_FLOW. */
'''
ofp_flow_stats_request = nstruct(
(ofp_table, 'table_id'), # /* ID of table to read (from ofp_table_stats),
# OFPTT_ALL for all tables. */
(uint8[3],), # /* Align to 32 bits. */
(ofp_port_no, 'out_port'), # /* Require matching entries to include this
# as an output port. A value of OFPP_ANY
# indicates no restriction. */
(ofp_group, 'out_group'), # /* Require matching entries to include this
# as an output group. A value of OFPG_ANY
# indicates no restriction. */
(uint8[4],), # /* Align to 64 bits. */
(uint64, 'cookie'), # /* Require matching entries to contain this
# cookie value */
(uint64, 'cookie_mask'),# /* Mask used to restrict the cookie bits that
# must match. A value of 0 indicates
# no restriction. */
(ofp_match, 'match'), # /* Fields to match. Variable size. */
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_FLOW,
classifyby = (OFPMP_FLOW,),
init = packvalue(OFPMP_FLOW, 'type'),
name = 'ofp_flow_stats_request'
)
'''
/* Body of reply to OFPMP_FLOW request. */
'''
ofp_flow_stats = nstruct(
(uint16, 'length'), # /* Length of this entry. */
(uint8, 'table_id'), # /* ID of table flow came from. */
(uint8,),
(uint32, 'duration_sec'), # /* Time flow has been alive in seconds. */
(uint32, 'duration_nsec'), # /* Time flow has been alive in nanoseconds beyond
# duration_sec. */
(uint16, 'priority'), # /* Priority of the entry. */
(uint16, 'idle_timeout'), # /* Number of seconds idle before expiration. */
(uint16, 'hard_timeout'), # /* Number of seconds before expiration. */
(ofp_flow_mod_flags, 'flags'), # /* Bitmap of OFPFF_* flags. */
(uint8[4],), # /* Align to 64-bits. */
(uint64, 'cookie'), # /* Opaque controller-issued identifier. */
(uint64, 'packet_count'), # /* Number of packets in flow. */
(uint64, 'byte_count'), # /* Number of bytes in flow. */
(ofp_match, 'match'), # /* Description of fields. Variable size. */
# /* The variable size and padded match is always followed by instructions. */
(ofp_instruction[0], 'instructions'), # /* Instruction set - 0 or more. */
name = 'ofp_flow_stats',
size = lambda x: x.length,
prepack = packsize('length')
)
ofp_flow_stats_reply = nstruct(
(ofp_flow_stats[0], 'stats'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_FLOW,
classifyby = (OFPMP_FLOW,),
init = packvalue(OFPMP_FLOW, 'type'),
name = 'ofp_flow_stats_reply'
)
'''
/* Body for ofp_multipart_request of type OFPMP_AGGREGATE. */
'''
ofp_aggregate_stats_request = nstruct(
(ofp_table, 'table_id'), # /* ID of table to read (from ofp_table_stats)
# OFPTT_ALL for all tables. */
(uint8[3],), # /* Align to 32 bits. */
(ofp_port_no, 'out_port'), # /* Require matching entries to include this
# as an output port. A value of OFPP_ANY
# indicates no restriction. */
(ofp_group, 'out_group'), # /* Require matching entries to include this
# as an output group. A value of OFPG_ANY
# indicates no restriction. */
(uint8[4],), # /* Align to 64 bits. */
(uint64, 'cookie'), # /* Require matching entries to contain this
# cookie value */
(uint64, 'cookie_mask'), # /* Mask used to restrict the cookie bits that
# must match. A value of 0 indicates
# no restriction. */
(ofp_match, 'match'), # /* Fields to match. Variable size. */
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_AGGREGATE,
classifyby = (OFPMP_AGGREGATE,),
init = packvalue(OFPMP_AGGREGATE, 'type'),
name = 'ofp_aggregate_stats_request'
)
'''
/* Body of reply to OFPMP_AGGREGATE request. */
'''
ofp_aggregate_stats_reply = nstruct(
(uint64, 'packet_count'), # /* Number of packets in flows. */
(uint64, 'byte_count'), # /* Number of bytes in flows. */
(uint32, 'flow_count'), # /* Number of flows. */
(uint8[4],), # /* Align to 64 bits. */
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_AGGREGATE,
classifyby = (OFPMP_AGGREGATE,),
init = packvalue(OFPMP_AGGREGATE, 'type'),
name = 'ofp_aggregate_stats_reply'
)
'''
/* Table Feature property types.
* Low order bit cleared indicates a property for a regular Flow Entry.
* Low order bit set indicates a property for the Table-Miss Flow Entry.
*/
'''
ofp_table_feature_prop_type = enum('ofp_table_feature_prop_type', globals(), uint16,
OFPTFPT_INSTRUCTIONS = 0, # /* Instructions property. */
OFPTFPT_INSTRUCTIONS_MISS = 1, # /* Instructions for table-miss. */
OFPTFPT_NEXT_TABLES = 2, # /* Next Table property. */
OFPTFPT_NEXT_TABLES_MISS = 3, # /* Next Table for table-miss. */
OFPTFPT_WRITE_ACTIONS = 4, # /* Write Actions property. */
OFPTFPT_WRITE_ACTIONS_MISS = 5, # /* Write Actions for table-miss. */
OFPTFPT_APPLY_ACTIONS = 6, # /* Apply Actions property. */
OFPTFPT_APPLY_ACTIONS_MISS = 7, # /* Apply Actions for table-miss. */
OFPTFPT_MATCH = 8, # /* Match property. */
OFPTFPT_WILDCARDS = 10,# /* Wildcards property. */
OFPTFPT_WRITE_SETFIELD = 12,# /* Write Set-Field property. */
OFPTFPT_WRITE_SETFIELD_MISS = 13,# /* Write Set-Field for table-miss. */
OFPTFPT_APPLY_SETFIELD = 14,# /* Apply Set-Field property. */
OFPTFPT_APPLY_SETFIELD_MISS = 15,# /* Apply Set-Field for table-miss. */
OFPTFPT_EXPERIMENTER = 0xFFFE,# /* Experimenter property. */
OFPTFPT_EXPERIMENTER_MISS = 0xFFFF,# /* Experimenter for table-miss. */
)
'''
/* Common header for all Table Feature Properties */
'''
ofp_table_feature_prop = nstruct(
(ofp_table_feature_prop_type, 'type'), # /* One of OFPTFPT_*. */
(uint16, 'length'), # /* Length in bytes of this property. */
name = 'ofp_table_feature_prop',
size = lambda x: x.length,
prepack = packrealsize('length'),
classifier = lambda x: x.type
)
ofp_instruction_feature = nstruct(
(ofp_instruction_type, 'type'), # /* Instruction type */
(uint16, 'len'), # /* Length of this struct in bytes. */
name = 'ofp_instruction_feature',
size = lambda x: x.len,
prepack = packsize('len'),
padding = 1
)
ofp_instruction_experimenter_feature = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same form as in struct ofp_experimenter_header. */
# /* Experimenter-defined arbitrary additional data. */
base = ofp_instruction_feature,
name = 'ofp_instruction_experimenter_feature',
criteria = lambda x: x.type == OFPIT_EXPERIMENTER,
init = packvalue(OFPIT_EXPERIMENTER, 'type')
)
'''
/* Instructions property */
'''
ofp_table_feature_prop_instructions = nstruct(
(ofp_instruction_feature[0], 'instruction_ids'), # /* List of instructions */
name = 'ofp_table_feature_prop_instructions',
base = ofp_table_feature_prop,
criteria = lambda x: x.type == OFPTFPT_INSTRUCTIONS or x.type == OFPTFPT_INSTRUCTIONS_MISS,
classifyby = (OFPTFPT_INSTRUCTIONS, OFPTFPT_INSTRUCTIONS_MISS),
init = packvalue(OFPTFPT_INSTRUCTIONS, 'type')
)
'''
/* Next Tables property */
'''
ofp_table_feature_prop_next_tables = nstruct(
(uint8[0], 'next_table_ids'), # /* List of table ids. */
base = ofp_table_feature_prop,
name = 'ofp_table_feature_prop_next_tables',
criteria = lambda x: x.type == OFPTFPT_NEXT_TABLES or x.type == OFPTFPT_NEXT_TABLES_MISS,
classifyby = (OFPTFPT_NEXT_TABLES, OFPTFPT_NEXT_TABLES_MISS),
init = packvalue(OFPTFPT_NEXT_TABLES, 'type')
)
ofp_action_desc = nstruct((ofp_action_type, 'type'),
(uint16, 'len'),
name = 'ofp_action_desc',
size = lambda x: x.len,
prepack = packsize('len'),
padding = 1
)
ofp_action_experimenter_desc = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same
# form as in struct
# ofp_experimenter_header. */
base = ofp_action_desc,
criteria = lambda x: x.type == OFPAT_EXPERIMENTER,
init = packvalue(OFPAT_EXPERIMENTER, 'type'),
name = 'ofp_action_experimenter_desc'
)
'''
/* Actions property */
'''
ofp_table_feature_prop_actions = nstruct(
# /* Followed by:
# * - Exactly (length - 4) bytes containing the action_ids, then
# * - Exactly (length + 7)/8*8 - (length) (between 0 and 7)
# * bytes of all-zero bytes */
(ofp_action_desc[0], 'action_ids'), # /* List of actions */
base = ofp_table_feature_prop,
criteria = lambda x: x.type in (OFPTFPT_WRITE_ACTIONS, OFPTFPT_WRITE_ACTIONS_MISS, OFPTFPT_APPLY_ACTIONS, OFPTFPT_APPLY_ACTIONS_MISS),
classifyby = (OFPTFPT_WRITE_ACTIONS, OFPTFPT_WRITE_ACTIONS_MISS, OFPTFPT_APPLY_ACTIONS, OFPTFPT_APPLY_ACTIONS_MISS),
init = packvalue(OFPTFPT_APPLY_ACTIONS, 'type'),
name = 'ofp_table_feature_prop_actions'
)
'''
/* Match, Wildcard or Set-Field property */
'''
ofp_table_feature_prop_oxm = nstruct(
# uint16_t type; /* One of OFPTFPT_MATCH,
# OFPTFPT_WILDCARDS,
# OFPTFPT_WRITE_SETFIELD,
# OFPTFPT_WRITE_SETFIELD_MISS,
# OFPTFPT_APPLY_SETFIELD,
# OFPTFPT_APPLY_SETFIELD_MISS. */
# uint16_t length; /* Length in bytes of this property. */
# /* Followed by:
# * - Exactly (length - 4) bytes containing the oxm_ids, then
# * - Exactly (length + 7)/8*8 - (length) (between 0 and 7)
# * bytes of all-zero bytes */
(ofp_oxm_header[0], 'oxm_ids'), # /* Array of OXM headers */
name = 'ofp_table_feature_prop_oxm',
base = ofp_table_feature_prop,
criteria = lambda x: x.type in (OFPTFPT_MATCH, OFPTFPT_WILDCARDS, OFPTFPT_WRITE_SETFIELD, OFPTFPT_WRITE_SETFIELD_MISS, OFPTFPT_APPLY_SETFIELD, OFPTFPT_APPLY_SETFIELD_MISS),
classifyby = (OFPTFPT_MATCH, OFPTFPT_WILDCARDS, OFPTFPT_WRITE_SETFIELD, OFPTFPT_WRITE_SETFIELD_MISS, OFPTFPT_APPLY_SETFIELD, OFPTFPT_APPLY_SETFIELD_MISS),
init = packvalue(OFPTFPT_MATCH, 'type')
)
'''
/* Experimenter table feature property */
'''
ofp_table_feature_prop_experimenter = nstruct(
# uint16_t type; /* One of OFPTFPT_EXPERIMENTER,
# OFPTFPT_EXPERIMENTER_MISS. */
# uint16_t length; /* Length in bytes of this property. */
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same
# form as in struct
# ofp_experimenter_header. */
(uint32, 'exp_type'), # /* Experimenter defined. */
# /* Followed by:
# * - Exactly (length - 12) bytes containing the experimenter data, then
# * - Exactly (length + 7)/8*8 - (length) (between 0 and 7)
# * bytes of all-zero bytes */
(uint32[0], 'experimenter_data'),
name = 'ofp_table_feature_prop_experimenter',
base = ofp_table_feature_prop,
criteria = lambda x: x.type == OFPTFPT_EXPERIMENTER or x.type == OFPTFPT_EXPERIMENTER_MISS,
classifyby = (OFPTFPT_EXPERIMENTER, OFPTFPT_EXPERIMENTER_MISS),
init = packvalue(OFPTFPT_EXPERIMENTER, 'type')
)
'''
/* Body for ofp_multipart_request of type OFPMP_TABLE_FEATURES./
* Body of reply to OFPMP_TABLE_FEATURES request. */
'''
ofp_table_features = nstruct(
(uint16, 'length'), # /* Length is padded to 64 bits. */
(uint8, 'table_id'), # /* Identifier of table. Lower numbered tables
# are consulted first. */
(uint8[5],), # /* Align to 64-bits. */
(char[OFP_MAX_TABLE_NAME_LEN], 'name'),
(uint64, 'metadata_match'), #/* Bits of metadata table can match. */
(uint64, 'metadata_write'), #/* Bits of metadata table can write. */
(ofp_table_config, 'config'), #/* Bitmap of OFPTC_* values */
(uint32, 'max_entries'), #/* Max number of entries supported. */
# /* Table Feature Property list */
(ofp_table_feature_prop[0], 'properties'), # /* List of properties */
name = 'ofp_table_features',
size = lambda x: x.length,
prepack = packrealsize('length')
)
ofp_table_features_request = nstruct(
(ofp_table_features[0], 'features'),
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_TABLE_FEATURES,
classifyby = (OFPMP_TABLE_FEATURES,),
init = packvalue(OFPMP_TABLE_FEATURES, 'type'),
name = 'ofp_table_features_request'
)
ofp_table_features_reply = nstruct(
(ofp_table_features[0], 'features'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_TABLE_FEATURES,
classifyby = (OFPMP_TABLE_FEATURES,),
init = packvalue(OFPMP_TABLE_FEATURES, 'type'),
name = 'ofp_table_features_reply'
)
'''
/* Body of reply to OFPMP_TABLE request. */
'''
ofp_table_stats = nstruct(
(uint8, 'table_id'), # /* Identifier of table. Lower numbered tables
# are consulted first. */
(uint8[3],), # /* Align to 32-bits. */
(uint32, 'active_count'), # /* Number of active entries. */
(uint64, 'lookup_count'), # /* Number of packets looked up in table. */
(uint64, 'matched_count'), # /* Number of packets that hit table. */
name = 'ofp_table_stats'
)
ofp_table_stats_reply = nstruct(
(ofp_table_stats[0], 'stats'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_TABLE,
classifyby = (OFPMP_TABLE,),
init = packvalue(OFPMP_TABLE, 'type'),
name = 'ofp_table_stats_reply'
)
'''
/* Body for ofp_multipart_request of type OFPMP_PORT. */
'''
ofp_port_stats_request = nstruct(
(ofp_port_no, 'port_no'), # /* OFPMP_PORT message must request statistics
# * either for a single port (specified in
# * port_no) or for all ports (if port_no ==
# * OFPP_ANY). */
(uint8[4],),
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_PORT_STATS,
classifyby = (OFPMP_PORT_STATS,),
init = packvalue(OFPMP_PORT_STATS, 'type'),
name = 'ofp_port_stats_request'
)
'''
/* Body of reply to OFPMP_PORT request. If a counter is unsupported, set
* the field to all ones. */
'''
ofp_port_stats = nstruct(
(ofp_port_no, 'port_no'),
(uint8[4],), # /* Align to 64-bits. */
(uint64, 'rx_packets'), # /* Number of received packets. */
(uint64, 'tx_packets'), # /* Number of transmitted packets. */
(uint64, 'rx_bytes'), # /* Number of received bytes. */
(uint64, 'tx_bytes'), # /* Number of transmitted bytes. */
(uint64, 'rx_dropped'), # /* Number of packets dropped by RX. */
(uint64, 'tx_dropped'), # /* Number of packets dropped by TX. */
(uint64, 'rx_errors'), # /* Number of receive errors. This is a super-set
# of more specific receive errors and should be
# greater than or equal to the sum of all
# rx_*_err values. */
(uint64, 'tx_errors'), # /* Number of transmit errors. This is a super-set
# of more specific transmit errors and should be
# greater than or equal to the sum of all
# tx_*_err values (none currently defined.) */
(uint64, 'rx_frame_err'), # /* Number of frame alignment errors. */
(uint64, 'rx_over_err'), # /* Number of packets with RX overrun. */
(uint64, 'rx_crc_err'), # /* Number of CRC errors. */
(uint64, 'collisions'), # /* Number of collisions. */
(uint32, 'duration_sec'), # /* Time port has been alive in seconds. */
(uint32, 'duration_nsec'), # /* Time port has been alive in nanoseconds beyond
# duration_sec. */
name = 'ofp_port_stats'
)
ofp_port_stats_reply = nstruct(
(ofp_port_stats[0], 'stats'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_PORT_STATS,
classifyby = (OFPMP_PORT_STATS,),
init = packvalue(OFPMP_PORT_STATS, 'type'),
name = 'ofp_port_stats_reply'
)
ofp_port_desc_reply = nstruct(
(ofp_port[0], 'ports'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_PORT_DESC,
classifyby = (OFPMP_PORT_DESC,),
init = packvalue(OFPMP_PORT_DESC, 'type'),
name = 'ofp_port_desc_reply'
)
'''
/* Body of OFPMP_GROUP request. */
'''
ofp_group_stats_request = nstruct(
(ofp_group, 'group_id'), # /* All groups if OFPG_ALL. */
(uint8[4],), # /* Align to 64 bits. */
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_GROUP,
classifyby = (OFPMP_GROUP,),
init = packvalue(OFPMP_GROUP, 'type'),
name = 'ofp_group_stats_request'
)
'''
/* Used in group stats replies. */
'''
ofp_bucket_counter = nstruct(
(uint64, 'packet_count'), # /* Number of packets processed by bucket. */
(uint64, 'byte_count'), # /* Number of bytes processed by bucket. */
name = 'ofp_bucket_counter'
)
'''
/* Body of reply to OFPMP_GROUP request. */
'''
ofp_group_stats = nstruct(
(uint16, 'length'), # /* Length of this entry. */
(uint8[2],), # /* Align to 64 bits. */
(uint32, 'group_id'), # /* Group identifier. */
(uint32, 'ref_count'), # /* Number of flows or groups that directly forward
# to this group. */
(uint8[4],), # /* Align to 64 bits. */
(uint64, 'packet_count'), # /* Number of packets processed by group. */
(uint64, 'byte_count'), # /* Number of bytes processed by group. */
(uint32, 'duration_sec'), # /* Time group has been alive in seconds. */
(uint32, 'duration_nsec'), # /* Time group has been alive in nanoseconds beyond
# duration_sec. */
(ofp_bucket_counter[0], 'bucket_stats'), # /* One counter set per bucket. */
name = 'ofp_group_stats',
size = lambda x: x.length,
prepack = packrealsize('length')
)
ofp_group_stats_reply = nstruct(
(ofp_group_stats[0], 'stats'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_GROUP,
classifyby = (OFPMP_GROUP,),
init = packvalue(OFPMP_GROUP, 'type'),
name = 'ofp_group_stats_reply'
)
'''
/* Body of reply to OFPMP_GROUP_DESC request. */
'''
ofp_group_desc = nstruct(
(uint16, 'length'), # /* Length of this entry. */
(ofp_group_type, 'type'), # /* One of OFPGT_*. */
(uint8,), # /* Pad to 64 bits. */
(uint32, 'group_id'), # /* Group identifier. */
(ofp_bucket[0], 'buckets'), # /* List of buckets - 0 or more. */
size = lambda x: x.length,
prepack = packrealsize('length'),
name = 'ofp_group_desc'
)
ofp_group_desc_reply = nstruct(
(ofp_group_desc[0], 'stats'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_GROUP_DESC,
classifyby = (OFPMP_GROUP_DESC,),
init = packvalue(OFPMP_GROUP_DESC, 'type'),
name = 'ofp_group_desc_reply'
)
'''
/* Backward compatibility with 1.3.1 - avoid breaking the API. */
'''
ofp_group_desc_stats = ofp_group_desc
'''
/* Group configuration flags */
'''
ofp_group_capabilities = enum('ofp_group_capabilities', globals(), uint32,
OFPGFC_SELECT_WEIGHT = 1 << 0, # /* Support weight for select groups */
OFPGFC_SELECT_LIVENESS = 1 << 1, # /* Support liveness for select groups */
OFPGFC_CHAINING = 1 << 2, # /* Support chaining groups */
OFPGFC_CHAINING_CHECKS = 1 << 3, # /* Check chaining for loops and delete */
)
ofp_group_type_bitwise = enum('ofp_group_type_bitwise', None, uint32, True,
**dict((k, 1<<v) for (k,v) in ofp_group_type.getDict().items()))
ofp_action_type_bitwise = enum('ofp_action_type_bitwise', None, uint32, True,
**dict((k, 1<<v) for (k,v) in ofp_action_type.getDict().items() if v < 32))
'''
/* Body of reply to OFPMP_GROUP_FEATURES request. Group features. */
'''
ofp_group_features = nstruct(
(ofp_group_type_bitwise, 'types'), # /* Bitmap of (1 << OFPGT_*) values supported. */
(ofp_group_capabilities, 'capabilities'), # /* Bitmap of OFPGFC_* capability supported. */
(uint32[4], 'max_groups'), # /* Maximum number of groups for each type. */
(ofp_action_type_bitwise[4], 'actions'), # /* Bitmaps of (1 << OFPAT_*) values supported. */
name = 'ofp_group_features'
)
ofp_group_features_reply = nstruct(
(ofp_group_features,),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_GROUP_FEATURES,
classifyby = (OFPMP_GROUP_FEATURES,),
init = packvalue(OFPMP_GROUP_FEATURES, 'type'),
name = 'ofp_group_features_reply'
)
'''
/* Body of OFPMP_METER and OFPMP_METER_CONFIG requests. */
'''
ofp_meter_multipart_request = nstruct(
(uint32, 'meter_id'), # /* Meter instance, or OFPM_ALL. */
(uint8[4],), # /* Align to 64 bits. */
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_METER or x.type == OFPMP_METER_CONFIG,
classifyby = (OFPMP_METER, OFPMP_METER_CONFIG),
init = packvalue(OFPMP_METER, 'type'),
name = 'ofp_meter_multipart_request'
)
'''
/* Statistics for each meter band */
'''
ofp_meter_band_stats = nstruct(
(uint64, 'packet_band_count'), # /* Number of packets in band. */
(uint64, 'byte_band_count'), # /* Number of bytes in band. */
name = 'ofp_meter_band_stats'
)
'''
/* Body of reply to OFPMP_METER request. Meter statistics. */
'''
ofp_meter_stats = nstruct(
(uint32, 'meter_id'), # /* Meter instance. */
(uint16, 'len'), # /* Length in bytes of this stats. */
(uint8[6],),
(uint32, 'flow_count'), # /* Number of flows bound to meter. */
(uint64, 'packet_in_count'), # /* Number of packets in input. */
(uint64, 'byte_in_count'), # /* Number of bytes in input. */
(uint32, 'duration_sec'), # /* Time meter has been alive in seconds. */
(uint32, 'duration_nsec'), # /* Time meter has been alive in nanoseconds beyond
# duration_sec. */
(ofp_meter_band_stats[0], 'band_stats'), # /* The band_stats length is
# inferred from the length field. */
size = lambda x: x.len,
prepack = packrealsize('len'),
name = 'ofp_meter_stats'
)
ofp_meter_stats_reply = nstruct(
(ofp_meter_stats[0], 'stats'),
name = 'ofp_meter_stats_reply',
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_METER,
classifyby = (OFPMP_METER,),
init = packvalue(OFPMP_METER, 'type')
)
'''
/* Body of reply to OFPMP_METER_CONFIG request. Meter configuration. */
'''
ofp_meter_config = nstruct(
(uint16, 'length'), # /* Length of this entry. */
(ofp_meter_flags, 'flags'), # /* All OFPMF_* that apply. */
(uint32, 'meter_id'), # /* Meter instance. */
(ofp_meter_band[0], 'bands'), # /* The bands length is
# inferred from the length field. */
name = 'ofp_meter_config',
size = lambda x: x.length,
prepack = packrealsize('length')
)
ofp_meter_config_reply = \
nstruct(
(ofp_meter_config[0], 'stats'),
name = 'ofp_meter_config_reply',
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_METER_CONFIG,
classifyby = (OFPMP_METER_CONFIG,),
init = packvalue(OFPMP_METER_CONFIG, 'type'),
)
ofp_meter_band_type_bitwise = enum('ofp_meter_band_type_bitwise', None, uint32, True,
**dict((k,1<<v) for k,v in ofp_meter_band_type.getDict().items()))
'''
/* Body of reply to OFPMP_METER_FEATURES request. Meter features. */
'''
ofp_meter_features = nstruct(
(uint32, 'max_meter'), # /* Maximum number of meters. */
(ofp_meter_band_type_bitwise, 'band_types'), # /* Bitmaps of (1 << OFPMBT_*) values supported. */
(ofp_meter_flags.astype(uint32, True), 'capabilities'),# /* Bitmaps of "ofp_meter_flags". */
(uint8, 'max_bands'), # /* Maximum bands per meters */
(uint8, 'max_color'), # /* Maximum color value */
(uint8[2],),
name = 'ofp_meter_features'
)
ofp_meter_features_reply = nstruct(
(ofp_meter_features,),
name = 'ofp_meter_features_reply',
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_METER_FEATURES,
classifyby = (OFPMP_METER_FEATURES,),
init = packvalue(OFPMP_METER_FEATURES, 'type'),
)
'''
/* Body for ofp_multipart_request/reply of type OFPMP_EXPERIMENTER. */
'''
ofp_experimenter_multipart_header = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID which takes the same form
# as in struct ofp_experimenter_header. */
(uint32, 'exp_type'), # /* Experimenter defined. */
# /* Experimenter-defined arbitrary additional data. */
name = 'ofp_experimenter_multipart_header',
)
ofp_experimenter_multipart_request = nstruct(
(ofp_experimenter_multipart_header,),
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_EXPERIMENTER,
init = packvalue(OFPMP_EXPERIMENTER, 'type'),
classifyby = (OFPMP_EXPERIMENTER,),
name = 'ofp_experimenter_multipart_request',
)
ofp_experimenter_multipart_reply = nstruct(
(ofp_experimenter_multipart_header,),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_EXPERIMENTER,
init = packvalue(OFPMP_EXPERIMENTER, 'type'),
classifyby = (OFPMP_EXPERIMENTER,),
name = 'ofp_experimenter_multipart_reply',
)
'''
/* Experimenter extension. */
'''
ofp_experimenter = nstruct(
(experimenter_ids, 'experimenter'), # /* Experimenter ID:
# * - MSB 0: low-order bytes are IEEE OUI.
# * - MSB != 0: defined by ONF. */
(uint32, 'exp_type'), # /* Experimenter defined. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_EXPERIMENTER,
classifyby = (OFPT_EXPERIMENTER,),
init = packvalue(OFPT_EXPERIMENTER, 'header', 'type'),
name = 'ofp_experimenter'
)
'''
/* All ones is used to indicate all queues in a port (for stats retrieval). */
'''
ofp_queue = enum('ofp_queue', globals(), uint32,
OFPQ_ALL = 0xffffffff)
'''
/* Min rate > 1000 means not configured. */
'''
OFPQ_MIN_RATE_UNCFG = 0xffff
'''
/* Max rate > 1000 means not configured. */
'''
OFPQ_MAX_RATE_UNCFG = 0xffff
'''
/* Common description for a queue. */
'''
ofp_queue_prop_header = nstruct((ofp_queue_properties, 'property'),
(uint16, 'len'),
(uint8[4],),
name = 'ofp_queue_prop_header')
ofp_queue_prop = nstruct((ofp_queue_prop_header, 'prop_header'),
name = 'ofp_queue_prop',
size = lambda x: x.prop_header.len,
prepack = packrealsize('prop_header', 'len'),
classifier = lambda x: x.prop_header.property
)
'''
/* Min-Rate queue property description. */
'''
ofp_queue_prop_min_rate = nstruct((uint16, 'rate'),
(uint8[6],),
base = ofp_queue_prop,
criteria = lambda x: x.prop_header.property == OFPQT_MIN_RATE,
classifyby = (OFPQT_MIN_RATE,),
init = packvalue(OFPQT_MIN_RATE, 'prop_header', 'property'),
name = 'ofp_queue_prop_min_rate')
'''
/* Max-Rate queue property description. */
'''
ofp_queue_prop_max_rate = nstruct((uint16, 'rate'),
(uint8[6],),
base = ofp_queue_prop,
criteria = lambda x: x.prop_header.property == OFPQT_MAX_RATE,
classifyby = (OFPQT_MAX_RATE,),
init = packvalue(OFPQT_MAX_RATE, 'prop_header', 'property'),
name = 'ofp_queue_prop_max_rate')
'''
/* Experimenter queue property description. */
'''
ofp_queue_prop_experimenter = nstruct((experimenter_ids, 'experimenter'),
(uint8[4],),
(raw, 'data'),
name = 'ofp_queue_prop_experimenter',
base = ofp_queue_prop,
criteria = lambda x: x.prop_header.property == OFPQT_EXPERIMENTER,
classifyby = (OFPQT_EXPERIMENTER,),
init = packvalue(OFPQT_EXPERIMENTER, 'prop_header', 'property'))
'''
/* Full description for a queue. */
'''
ofp_packet_queue = nstruct(
(uint32, 'queue_id'), # /* id for the specific queue. */
(uint32, 'port'), # /* Port this queue is attached to. */
(uint16, 'len'), # /* Length in bytes of this queue desc. */
(uint8[6],), # /* 64-bit alignment. */
(ofp_queue_prop_header[0], 'properties'), # /* List of properties. */
name = 'ofp_packet_queue',
size = lambda x: x.len,
prepack = packsize('len')
)
'''
/* Query for port queue configuration. */
'''
ofp_queue_get_config_request = nstruct(
(ofp_port_no, 'port'), # /* Port to be queried. Should refer
# to a valid physical port (i.e. <= OFPP_MAX),
# or OFPP_ANY to request all configured
# queues.*/
(uint8[4],),
name = 'ofp_queue_get_config_request',
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_QUEUE_GET_CONFIG_REQUEST,
classifyby = (OFPT_QUEUE_GET_CONFIG_REQUEST,),
init = packvalue(OFPT_QUEUE_GET_CONFIG_REQUEST, 'header', 'type')
)
'''
/* Queue configuration for a given port. */
'''
ofp_queue_get_config_reply = nstruct(
(ofp_port_no, 'port'),
(uint8[4],),
(ofp_packet_queue[0], 'queues'), # /* List of configured queues. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_QUEUE_GET_CONFIG_REPLY,
classifyby = (OFPT_QUEUE_GET_CONFIG_REPLY,),
init = packvalue(OFPT_QUEUE_GET_CONFIG_REPLY, 'header', 'type'),
name = 'ofp_queue_get_config_reply'
)
'''
/* OFPAT_SET_QUEUE action struct: send packets to given queue on port. */
'''
ofp_action_set_queue = nstruct(
(uint32, 'queue_id'), # /* Queue id for the packets. */
base = ofp_action,
criteria = lambda x: x.type == OFPAT_SET_QUEUE,
classifyby = (OFPAT_SET_QUEUE,),
init = packvalue(OFPAT_SET_QUEUE, 'type'),
name = 'ofp_action_set_queue'
)
ofp_queue_stats_request = nstruct(
(ofp_port_no, 'port_no'), # /* All ports if OFPP_ANY. */
(ofp_queue, 'queue_id'), # /* All queues if OFPQ_ALL. */
base = ofp_multipart_request,
criteria = lambda x: x.type == OFPMP_QUEUE,
classifyby = (OFPMP_QUEUE,),
init = packvalue(OFPMP_QUEUE, 'type'),
name = 'ofp_queue_stats_request'
)
ofp_queue_stats = nstruct(
(uint32, 'port_no'),
(uint32, 'queue_id'), # /* Queue i.d */
(uint64, 'tx_bytes'), # /* Number of transmitted bytes. */
(uint64, 'tx_packets'), # /* Number of transmitted packets. */
(uint64, 'tx_errors'), # /* Number of packets dropped due to overrun. */
(uint32, 'duration_sec'), # /* Time queue has been alive in seconds. */
(uint32, 'duration_nsec'), # /* Time queue has been alive in nanoseconds beyond
# duration_sec. */
name = 'ofp_queue_stats'
)
ofp_queue_stats_reply = nstruct(
(ofp_queue_stats[0], 'stats'),
base = ofp_multipart_reply,
criteria = lambda x: x.type == OFPMP_QUEUE,
classifyby = (OFPMP_QUEUE,),
init = packvalue(OFPMP_QUEUE, 'type'),
name = 'ofp_queue_stats_reply'
)
'''
/* Configures the "role" of the sending controller. The default role is:
*
* - Equal (OFPCR_ROLE_EQUAL), which allows the controller access to all
* OpenFlow features. All controllers have equal responsibility.
*
* The other possible roles are a related pair:
*
* - Master (OFPCR_ROLE_MASTER) is equivalent to Equal, except that there
* may be at most one Master controller at a time: when a controller
* configures itself as Master, any existing Master is demoted to the
* Slave role.
*
* - Slave (OFPCR_ROLE_SLAVE) allows the controller read-only access to
* OpenFlow features. In particular attempts to modify the flow table
* will be rejected with an OFPBRC_EPERM error.
*
* Slave controllers do not receive OFPT_PACKET_IN or OFPT_FLOW_REMOVED
* messages, but they do receive OFPT_PORT_STATUS messages.
*/
'''
'''
/* Controller roles. */
'''
ofp_controller_role = enum('ofp_controller_role', globals(), uint32,
OFPCR_ROLE_NOCHANGE = 0, # /* Don't change current role. */
OFPCR_ROLE_EQUAL = 1, # /* Default role, full access. */
OFPCR_ROLE_MASTER = 2, # /* Full access, at most one master. */
OFPCR_ROLE_SLAVE = 3, # /* Read-only access. */
)
'''
/* Role request and reply message. */
'''
ofp_role_request = nstruct(
(ofp_controller_role, 'role'), # /* One of OFPCR_ROLE_*. */
(uint8[4],), # /* Align to 64 bits. */
(uint64, 'generation_id'), #/* Master Election Generation Id */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_ROLE_REQUEST or x.header.type == OFPT_ROLE_REPLY,
classifyby = (OFPT_ROLE_REQUEST, OFPT_ROLE_REPLY),
init = packvalue(OFPT_ROLE_REQUEST, 'header', 'type'),
name = 'ofp_role_request'
)
ofp_packet_in_reason_bitwise = enum('ofp_packet_in_reason_bitwise', None, uint32, True,
**dict((k, 1<<v) for k,v in ofp_packet_in_reason.getDict().items()))
ofp_port_reason_bitwise = enum('ofp_port_reason_bitwise', None, uint32, True,
**dict((k, 1<<v) for k,v in ofp_port_reason.getDict().items()))
ofp_flow_removed_reason_bitwise = enum('ofp_flow_removed_reason_bitwise', None, uint32, True,
**dict((k, 1<<v) for k,v in ofp_flow_removed_reason.getDict().items()))
'''
/* Asynchronous message configuration. */
'''
ofp_async_config = nstruct(
(ofp_packet_in_reason_bitwise[2], 'packet_in_mask'), # /* Bitmasks of OFPR_* values. */
(ofp_port_reason_bitwise[2], 'port_status_mask'), # /* Bitmasks of OFPPR_* values. */
(ofp_flow_removed_reason_bitwise[2], 'flow_removed_mask'), # /* Bitmasks of OFPRR_* values. */
base = ofp_msg,
criteria = lambda x: x.header.type == OFPT_GET_ASYNC_REPLY or x.header.type == OFPT_SET_ASYNC,
classifyby = (OFPT_GET_ASYNC_REPLY, OFPT_SET_ASYNC),
init = packvalue(OFPT_SET_ASYNC, 'header', 'type'),
name = 'ofp_async_config'
)
ofp_error_types = dict(ofp_error_types)
ofp_error_types.update({
OFPET_BAD_INSTRUCTION : ofp_error_typedef(OFPET_BAD_INSTRUCTION, ofp_bad_instruction_code, OFP_VERSION, ofp_error_type),
OFPET_BAD_MATCH : ofp_error_typedef(OFPET_BAD_MATCH, ofp_bad_match_code, OFP_VERSION, ofp_error_type),
OFPET_FLOW_MOD_FAILED : ofp_error_typedef(OFPET_FLOW_MOD_FAILED, ofp_flow_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_GROUP_MOD_FAILED : ofp_error_typedef(OFPET_GROUP_MOD_FAILED, ofp_group_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_PORT_MOD_FAILED : ofp_error_typedef(OFPET_PORT_MOD_FAILED, ofp_port_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_TABLE_MOD_FAILED : ofp_error_typedef(OFPET_TABLE_MOD_FAILED, ofp_table_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_QUEUE_OP_FAILED : ofp_error_typedef(OFPET_QUEUE_OP_FAILED, ofp_queue_op_failed_code, OFP_VERSION, ofp_error_type),
OFPET_SWITCH_CONFIG_FAILED : ofp_error_typedef(OFPET_SWITCH_CONFIG_FAILED, ofp_switch_config_failed_code, OFP_VERSION, ofp_error_type),
OFPET_ROLE_REQUEST_FAILED : ofp_error_typedef(OFPET_ROLE_REQUEST_FAILED, ofp_role_request_failed_code, OFP_VERSION, ofp_error_type),
OFPET_METER_MOD_FAILED : ofp_error_typedef(OFPET_METER_MOD_FAILED, ofp_meter_mod_failed_code, OFP_VERSION, ofp_error_type),
OFPET_TABLE_FEATURES_FAILED : ofp_error_typedef(OFPET_TABLE_FEATURES_FAILED, ofp_table_features_failed_code, OFP_VERSION, ofp_error_type)
})
ofp_vendor_vendorid = 'experimenter'
ofp_vendor_subtype = 'exp_type'
ofp_action_vendor_vendorid = 'experimenter'
ofp_action_vendor_subtype = 'exp_type'
ofp_stats_vendor_vendorid = 'experimenter'
ofp_stats_vendor_subtype = 'exp_type'
from .nicira_ext import *
'''
/* Header for Nicira vendor requests and replies. */
'''
nicira_header = nstruct(
base = ofp_experimenter,
criteria = lambda x: x.experimenter == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'experimenter'),
name = 'nicira_header',
classifier = lambda x: x.exp_type,
extend = {'exp_type': nxt_subtype}
)
'''
/* Header for Nicira-defined actions. */
'''
nx_action = nstruct(
(nx_action_subtype, 'exp_type'),
base = ofp_action_experimenter,
criteria = lambda x: x.experimenter == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'experimenter'),
name = 'nx_action',
classifier = lambda x: x.exp_type
)
nx_stats_request = nstruct(
base = ofp_experimenter_multipart_request,
criteria = lambda x: x.experimenter == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'experimenter'),
name = 'nx_stats_request',
classifier = lambda x: x.exp_type,
extend = {'exp_type': nx_stats_subtype}
)
nx_stats_reply = nstruct(
base = ofp_experimenter_multipart_reply,
criteria = lambda x: x.experimenter == NX_VENDOR_ID,
init = packvalue(NX_VENDOR_ID, 'experimenter'),
name = 'nx_stats_reply',
classifier = lambda x: x.exp_type,
extend = {'exp_type': nx_stats_subtype}
)
create_extension(globals(), nicira_header, nx_action, nx_stats_request, nx_stats_reply, ofp_vendor_subtype, ofp_action_vendor_subtype, ofp_stats_vendor_subtype)
| hubo1016/vlcp | vlcp/protocol/openflow/defs/openflow13.py | Python | apache-2.0 | 130,995 | 0.031612 |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
def test_array(array, attr, index_or_series):
box = index_or_series
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
msg = "MultiIndex has no single backing array"
with pytest.raises(ValueError, match=msg):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, pd.NA], dtype=object),
),
(
IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
),
(SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),
# tz-naive datetime
(
DatetimeArray(np.array(["2000", "2001"], dtype="M8[ns]")),
np.array(["2000", "2001"], dtype="M8[ns]"),
),
# tz-aware stays tz`-aware
(
DatetimeArray(
np.array(
["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
np.array(
[
pd.Timestamp("2000-01-01", tz="US/Central"),
pd.Timestamp("2000-01-02", tz="US/Central"),
]
),
),
# Timedelta
(
TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
np.array([0, 3600000000000], dtype="m8[ns]"),
),
],
)
def test_to_numpy(array, expected, index_or_series):
box = index_or_series
thing = box(array)
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = thing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize(
"arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)]
)
def test_to_numpy_copy(arr, as_series):
obj = pd.Index(arr, copy=False)
if as_series:
obj = pd.Series(obj.values, copy=False)
# no copy by default
result = obj.to_numpy()
assert np.shares_memory(arr, result) is True
result = obj.to_numpy(copy=False)
assert np.shares_memory(arr, result) is True
# copy=True
result = obj.to_numpy(copy=True)
assert np.shares_memory(arr, result) is False
@pytest.mark.parametrize("as_series", [True, False])
def test_to_numpy_dtype(as_series):
tz = "US/Eastern"
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
if as_series:
obj = pd.Series(obj)
# preserve tz by default
result = obj.to_numpy()
expected = np.array(
[pd.Timestamp("2000", tz=tz), pd.Timestamp("2001", tz=tz)], dtype=object
)
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="M8[ns]")
expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values, dtype, na_value, expected",
[
([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]),
(
[pd.Timestamp("2000"), pd.Timestamp("2000"), pd.NaT],
None,
pd.Timestamp("2000"),
[np.datetime64("2000-01-01T00:00:00.000000000")] * 3,
),
],
)
def test_to_numpy_na_value_numpy_dtype(
index_or_series, values, dtype, na_value, expected
):
obj = index_or_series(values)
result = obj.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array(expected)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_kwargs_raises():
# numpy
s = pd.Series([1, 2, 3])
msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
# extension
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [1, 2, None]},
{"a": np.array([1, 2, 3]), "b": np.array([1, 2, np.nan])},
{"a": pd.array([1, 2, 3]), "b": pd.array([1, 2, None])},
],
)
@pytest.mark.parametrize("dtype, na_value", [(float, np.nan), (object, None)])
def test_to_numpy_dataframe_na_value(data, dtype, na_value):
# https://github.com/pandas-dev/pandas/issues/33820
df = pd.DataFrame(data)
result = df.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
(
{"a": pd.array([1, 2, None])},
np.array([[1.0], [2.0], [np.nan]], dtype=float),
),
(
{"a": [1, 2, 3], "b": [1, 2, 3]},
np.array([[1, 1], [2, 2], [3, 3]], dtype=float),
),
],
)
def test_to_numpy_dataframe_single_block(data, expected):
# https://github.com/pandas-dev/pandas/issues/33820
df = pd.DataFrame(data)
result = df.to_numpy(dtype=float, na_value=np.nan)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dataframe_single_block_no_mutate():
# https://github.com/pandas-dev/pandas/issues/33820
result = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
expected = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
result.to_numpy(na_value=0.0)
tm.assert_frame_equal(result, expected)
| TomAugspurger/pandas | pandas/tests/base/test_conversion.py | Python | bsd-3-clause | 14,519 | 0.000551 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the storage file CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import storage_file
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class StorageFileArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage file CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [STORAGE_FILE]
Test argument parser.
positional arguments:
STORAGE_FILE Path to a storage file.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_file.StorageFileArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
test_tool = tools.CLITool()
options = cli_test_lib.TestOptions()
options.storage_file = self._GetTestFilePath(['test.plaso'])
storage_file.StorageFileArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._storage_file_path, options.storage_file)
with self.assertRaises(errors.BadConfigObject):
storage_file.StorageFileArgumentsHelper.ParseOptions(options, None)
if __name__ == '__main__':
unittest.main()
| Onager/plaso | tests/cli/helpers/storage_file.py | Python | apache-2.0 | 1,615 | 0.004334 |
import errno
import os
import sys
from contextlib import contextmanager
@contextmanager
def open_with_error(filename: str, mode: str = "r", encoding: str = "utf-8"):
try:
f = open(filename, mode=mode, encoding=encoding)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def get_next_file_path(folder: str, base_filename: str):
"""
Python version of this C# code: http://stackoverflow.com/a/1078898
Given a base file name, creates a unique filename. Check to see if the given file exists, and if it does
tries to find the next available file name by appending numbers to the base filename until a valid filename is
found.
:param folder: Full path to folder. If last path separator is omitted, then the function adds it. Ex:
``C:\\users\\bob\\images\\``
``C:\\users\\bob\\images`` (will add the backslash)
:param base_filename: The base filename of the file. Ex:
``image.png``
:return: The next available filename (Ex: image_2.png).
"""
pattern = "{filename}_{nb}{ext}"
if not folder.endswith(os.path.sep):
folder += os.path.sep
full_path = folder + base_filename
if not os.path.isfile(full_path):
return full_path
filename, file_extension = os.path.splitext(base_filename)
min_nbr, max_nbr = 1, 2
while os.path.isfile(
os.path.join(folder, pattern.format(filename=filename, nb=str(max_nbr), ext=file_extension))):
min_nbr = max_nbr
max_nbr *= 2
while max_nbr != min_nbr + 1:
pivot = int((max_nbr + min_nbr) / 2)
if os.path.isfile(
os.path.join(folder, pattern.format(filename=filename, nb=str(pivot), ext=file_extension))):
min_nbr = pivot
else:
max_nbr = pivot
return os.path.join(folder, pattern.format(filename=filename, nb=str(max_nbr), ext=file_extension))
def make_sure_path_exists(path: str) -> None:
"""
Makes sure that the path exists. If it does not exist
creates the path (all directories and sub-directories in the given path).
"""
if sys.version_info[:3] >= (3, 4, 1):
os.makedirs(path, exist_ok=True)
else:
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
| raphaeldore/analyzr | analyzr/utils/file.py | Python | mit | 2,428 | 0.002471 |
import sys,os
import numpy as np
#os.environ["EPICS_CA_AUTO_ADDR_LIST"] = "NO"
#os.environ["EPICS_CA_ADDR_LIST"] = "192.168.82.10"
#os.environ["EPICS_CA_MAX_ARRAY_BYTES"] = "10000000000"
import velaINJMagnetControl as VIMC
a = VIMC.velaINJMagnetController(True,False)
print( np.array(a.getQuadNames()))
| mothyjohn/VELA-CLARA-Controllers | General/enums/bin/Release/test.py | Python | gpl-3.0 | 307 | 0.022801 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.video import BaseVideo
class YoutubeTest(BackendTest):
BACKEND = 'youtube'
def test_search(self):
l = list(self.backend.search_videos('lol'))
self.assertTrue(len(l) > 0)
v = l[0]
self.backend.fillobj(v, ('url',))
self.assertTrue(v.url and v.url.startswith('http://'), 'URL for video "%s" not found: %s' % (v.id, v.url))
assert self.backend.get_video(v.shorturl)
self.backend.browser.openurl(v.url)
def test_latest(self):
l = list(self.backend.iter_resources([BaseVideo], [u'latest']))
assert len(l) > 0
| franek/weboob | modules/youtube/test.py | Python | agpl-3.0 | 1,394 | 0.002152 |
from helper import unittest, PillowTestCase, py3
from PIL import Image
class TestLibPack(PillowTestCase):
def pack(self):
pass # not yet
def test_pack(self):
def pack(mode, rawmode):
if len(mode) == 1:
im = Image.new(mode, (1, 1), 1)
else:
im = Image.new(mode, (1, 1), (1, 2, 3, 4)[:len(mode)])
if py3:
return list(im.tobytes("raw", rawmode))
else:
return [ord(c) for c in im.tobytes("raw", rawmode)]
order = 1 if Image._ENDIAN == '<' else -1
self.assertEqual(pack("1", "1"), [128])
self.assertEqual(pack("1", "1;I"), [0])
self.assertEqual(pack("1", "1;R"), [1])
self.assertEqual(pack("1", "1;IR"), [0])
self.assertEqual(pack("L", "L"), [1])
self.assertEqual(pack("I", "I"), [1, 0, 0, 0][::order])
self.assertEqual(pack("F", "F"), [0, 0, 128, 63][::order])
self.assertEqual(pack("LA", "LA"), [1, 2])
self.assertEqual(pack("RGB", "RGB"), [1, 2, 3])
self.assertEqual(pack("RGB", "RGB;L"), [1, 2, 3])
self.assertEqual(pack("RGB", "BGR"), [3, 2, 1])
self.assertEqual(pack("RGB", "RGBX"), [1, 2, 3, 255]) # 255?
self.assertEqual(pack("RGB", "BGRX"), [3, 2, 1, 0])
self.assertEqual(pack("RGB", "XRGB"), [0, 1, 2, 3])
self.assertEqual(pack("RGB", "XBGR"), [0, 3, 2, 1])
self.assertEqual(pack("RGBX", "RGBX"), [1, 2, 3, 4]) # 4->255?
self.assertEqual(pack("RGBA", "RGBA"), [1, 2, 3, 4])
self.assertEqual(pack("CMYK", "CMYK"), [1, 2, 3, 4])
self.assertEqual(pack("YCbCr", "YCbCr"), [1, 2, 3])
def test_unpack(self):
def unpack(mode, rawmode, bytes_):
im = None
if py3:
data = bytes(range(1, bytes_+1))
else:
data = ''.join(chr(i) for i in range(1, bytes_+1))
im = Image.frombytes(mode, (1, 1), data, "raw", rawmode, 0, 1)
return im.getpixel((0, 0))
def unpack_1(mode, rawmode, value):
assert mode == "1"
im = None
if py3:
im = Image.frombytes(
mode, (8, 1), bytes([value]), "raw", rawmode, 0, 1)
else:
im = Image.frombytes(
mode, (8, 1), chr(value), "raw", rawmode, 0, 1)
return tuple(im.getdata())
X = 255
self.assertEqual(unpack_1("1", "1", 1), (0, 0, 0, 0, 0, 0, 0, X))
self.assertEqual(unpack_1("1", "1;I", 1), (X, X, X, X, X, X, X, 0))
self.assertEqual(unpack_1("1", "1;R", 1), (X, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(unpack_1("1", "1;IR", 1), (0, X, X, X, X, X, X, X))
self.assertEqual(unpack_1("1", "1", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack_1("1", "1;I", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;R", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;IR", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack("L", "L;2", 1), 0)
self.assertEqual(unpack("L", "L;4", 1), 0)
self.assertEqual(unpack("L", "L", 1), 1)
self.assertEqual(unpack("L", "L;I", 1), 254)
self.assertEqual(unpack("L", "L;R", 1), 128)
self.assertEqual(unpack("L", "L;16", 2), 2) # little endian
self.assertEqual(unpack("L", "L;16B", 2), 1) # big endian
self.assertEqual(unpack("LA", "LA", 2), (1, 2))
self.assertEqual(unpack("LA", "LA;L", 2), (1, 2))
self.assertEqual(unpack("RGB", "RGB", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;L", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;R", 3), (128, 64, 192))
self.assertEqual(unpack("RGB", "RGB;16B", 6), (1, 3, 5)) # ?
self.assertEqual(unpack("RGB", "BGR", 3), (3, 2, 1))
self.assertEqual(unpack("RGB", "RGB;15", 2), (8, 131, 0))
self.assertEqual(unpack("RGB", "BGR;15", 2), (0, 131, 8))
self.assertEqual(unpack("RGB", "RGB;16", 2), (8, 64, 0))
self.assertEqual(unpack("RGB", "BGR;16", 2), (0, 64, 8))
self.assertEqual(unpack("RGB", "RGB;4B", 2), (17, 0, 34))
self.assertEqual(unpack("RGB", "RGBX", 4), (1, 2, 3))
self.assertEqual(unpack("RGB", "BGRX", 4), (3, 2, 1))
self.assertEqual(unpack("RGB", "XRGB", 4), (2, 3, 4))
self.assertEqual(unpack("RGB", "XBGR", 4), (4, 3, 2))
self.assertEqual(unpack("RGBA", "RGBA", 4), (1, 2, 3, 4))
self.assertEqual(unpack("RGBA", "BGRA", 4), (3, 2, 1, 4))
self.assertEqual(unpack("RGBA", "ARGB", 4), (2, 3, 4, 1))
self.assertEqual(unpack("RGBA", "ABGR", 4), (4, 3, 2, 1))
self.assertEqual(unpack("RGBA", "RGBA;15", 2), (8, 131, 0, 0))
self.assertEqual(unpack("RGBA", "BGRA;15", 2), (0, 131, 8, 0))
self.assertEqual(unpack("RGBA", "RGBA;4B", 2), (17, 0, 34, 0))
self.assertEqual(unpack("RGBX", "RGBX", 4), (1, 2, 3, 4)) # 4->255?
self.assertEqual(unpack("RGBX", "BGRX", 4), (3, 2, 1, 255))
self.assertEqual(unpack("RGBX", "XRGB", 4), (2, 3, 4, 255))
self.assertEqual(unpack("RGBX", "XBGR", 4), (4, 3, 2, 255))
self.assertEqual(unpack("RGBX", "RGB;15", 2), (8, 131, 0, 255))
self.assertEqual(unpack("RGBX", "BGR;15", 2), (0, 131, 8, 255))
self.assertEqual(unpack("RGBX", "RGB;4B", 2), (17, 0, 34, 255))
self.assertEqual(unpack("CMYK", "CMYK", 4), (1, 2, 3, 4))
self.assertEqual(unpack("CMYK", "CMYK;I", 4), (254, 253, 252, 251))
self.assertRaises(ValueError, lambda: unpack("L", "L", 0))
self.assertRaises(ValueError, lambda: unpack("RGB", "RGB", 2))
self.assertRaises(ValueError, lambda: unpack("CMYK", "CMYK", 2))
if __name__ == '__main__':
unittest.main()
# End of file
| 1upon0/rfid-auth-system | GUI/printer/Pillow-2.7.0/Tests/test_lib_pack.py | Python | apache-2.0 | 5,896 | 0 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2013 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Document
Provides a ly.document.Document api for a QTextDocument (or: more specifically
a Frescobaldi document.Document).
This can be used to perform operations from the ly module on a loaded
Frescobaldi document.
You don't need to save a Document instance. Just create it and use it, then
discard it.
"""
from PyQt5.QtGui import QTextCursor
import ly.document
import tokeniter
import highlighter
def cursor(cursor, select_all=False):
"""Return a Cursor for the specified QTextCursor.
The ly Cursor is instantiated with a Document proxying for the
original cursors document.
So you can call all operations in the ly module and they will work on a
Frescobaldi document (which is a subclass of QTextDocument).
If select_all is True, the ly Cursor selects the whole document if the
original cursor has no selection.
"""
if not select_all or cursor.hasSelection():
start, end = cursor.selectionStart(), cursor.selectionEnd()
else:
start, end = 0, None
return Cursor(Document(cursor.document()), start, end)
class Cursor(ly.document.Cursor):
"""A ly.document.Cursor with an extra cursor() method."""
def cursor(self):
"""Return a QTextCursor with the same selection."""
c = QTextCursor(self.document.document)
c.movePosition(QTextCursor.End) if self.end is None else c.setPosition(self.end)
c.setPosition(self.start, QTextCursor.KeepAnchor)
return c
class Document(ly.document.DocumentBase):
"""Document proxies a loaded Frescobaldi document (QTextDocument).
This is used to let the tools in the ly module operate on Frescobaldi
documents.
Creating a Document is very fast, you do not need to save it. When
applying the changes, Document starts an editblock, so that the
operations appears as one undo-item.
It is recommended to not nest calls to QTextCursor.beginEditBlock(), as
the highlighter is not called to update the tokens until the last
endEditBlock() is called.
Therefore Document provides a simple mechanism for combining several
change operations via the combine_undo attribute.
If combine_undo is None (the default), the first time changes are applied
QTextCursor.beginEditBlock() will be called, but subsequent times
QTextCursor.joinPreviousEditBlock() will be used. So the highlighter
updates the tokens between the operations, but they will appear as one
undo-item.
If you want to combine the very first operation already with an earlier
change, set combine_undo to True before the changes are applied (e.g.
before entering or exiting the context).
If you do not want to combine operations into a single undo-item at all,
set combine_undo to False.
(Of course you can nest calls to QTextCursor.beginEditBlock(), but in
that case the tokens will not be updated between your operations. If
your operations do not depend on the tokens, it is no problem
whatsoever. The tokens *are* updated after the last call to
QTextCursor.endEditBlock().)
"""
def __init__(self, document):
self._d = document
super(Document, self).__init__()
self.combine_undo = None
def __len__(self):
"""Return the number of blocks"""
return self._d.blockCount()
def __getitem__(self, index):
"""Return the block at the specified index."""
return self._d.findBlockByNumber(index)
@property
def document(self):
"""Return the QTextDocument we were instantiated with."""
return self._d
@property
def filename(self):
"""Return the document's local filename, if any."""
return self.document.url().toLocalFile()
def plaintext(self):
"""The document contents as a plain text string."""
return self._d.toPlainText()
def setplaintext(self, text):
"""Sets the document contents to the text string."""
self._d.setPlainText(text)
def size(self):
"""Return the number of characters in the document."""
return self._d.characterCount()
def block(self, position):
"""Return the text block at the specified character position.
The text block itself has no methods, but it can be used as an
argument to other methods of this class.
(Blocks do have to support the '==' operator.)
"""
return self._d.findBlock(position)
def index(self, block):
"""Return the linenumber of the block (starting with 0)."""
return block.blockNumber()
def position(self, block):
"""Return the position of the specified block."""
return block.position()
def text(self, block):
"""Return the text of the specified block."""
return block.text()
def next_block(self, block):
"""Return the next block, which may be invalid."""
return block.next()
def previous_block(self, block):
"""Return the previous block, which may be invalid."""
return block.previous()
def isvalid(self, block):
"""Return True if the block is a valid block."""
return block.isValid()
def apply_changes(self):
"""Apply the changes and update the tokens."""
c = QTextCursor(self._d)
# record a sensible position for undo
c.setPosition(self._changes_list[-1][0])
c.joinPreviousEditBlock() if self.combine_undo else c.beginEditBlock()
try:
for start, end, text in self._changes_list:
c.movePosition(QTextCursor.End) if end is None else c.setPosition(end)
c.setPosition(start, QTextCursor.KeepAnchor)
c.insertText(text)
finally:
c.endEditBlock()
if self.combine_undo is None:
self.combine_undo = True
def tokens(self, block):
"""Return the tuple of tokens of the specified block."""
return tokeniter.tokens(block)
def initial_state(self):
"""Return the state at the beginning of the document."""
return highlighter.highlighter(self._d).initialState()
def state(self, block):
"""Return the state at the start of the specified block."""
return tokeniter.state(block)
def state_end(self, block):
"""Return the state at the end of the specified block."""
return tokeniter.state_end(block)
class Runner(ly.document.Runner):
"""A Runner that adds a cursor() method, returning a QTextCursor."""
def cursor(self, start=0, end=None):
"""Returns a QTextCursor for the last token.
If start is given the cursor will start at position start in the token
(from the beginning of the token). Start defaults to 0.
If end is given, the cursor will end at that position in the token (from
the beginning of the token). End defaults to the length of the token.
"""
if end is None:
end = len(self.token())
c = QTextCursor(self.document.document)
c.setPosition(self.position() + start)
c.setPosition(self.position() + end, QTextCursor.KeepAnchor)
return c
class Source(ly.document.Source):
"""A Source that adds a cursor() method, returning a QTextCursor."""
def cursor(self, token, start=0, end=None):
"""Returns a QTextCursor for the specified token.
If start is given the cursor will start at position start in the token
(from the beginning of the token). Start defaults to 0.
If end is given, the cursor will end at that position in the token (from
the beginning of the token). End defaults to the length of the token.
"""
if end is None:
end = len(token)
c = QTextCursor(self.document.document)
pos = self.position(token)
c.setPosition(pos + start)
c.setPosition(pos + end, QTextCursor.KeepAnchor)
return c
| brownian/frescobaldi | frescobaldi_app/lydocument.py | Python | gpl-2.0 | 8,870 | 0.000676 |
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, signal, traceback
import thread
try:
import termios
except ImportError:
termios = None
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
_error_files = []
def code_changed():
global _mtimes, _win
filenames = []
for m in list(sys.modules.values()):
try:
filenames.append(m.__file__)
except AttributeError:
pass
for filename in filenames + _error_files:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if not os.path.exists(filename):
continue # File might be in an egg, so it can't be reloaded.
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return True
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except (ImportError, IndentationError, NameError, SyntaxError,
TypeError, AttributeError):
et, ev, tb = sys.exc_info()
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
while RUN_RELOADER:
if code_changed():
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
| yangdw/PyRepo | src/annotation/haven/haven/autoreload.py | Python | mit | 5,900 | 0.002712 |
"""
Test suite for module.
Holds constants and methods shared among multiple tests.
See submodules for individual tests.
"""
import os
import shutil
PARAM_LIST = {
'pre_min': 100,
'pre_max': 101,
'Pre': 10,
'pim_min': -101,
'pim_max': -100,
'Pim': 14,
'T': 16,
'I': 20,
}
OUTPUT_DIR = "test-output"
def purge_output_dir(path=OUTPUT_DIR):
delete_output_dir(path=path)
if os.path.exists(path):
raise Exception("Failed to removed test output folder")
os.makedirs(path)
def delete_output_dir(path=OUTPUT_DIR):
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
shutil.rmtree(path)
| kidmose/python-course | mandelbrot/tests/__init__.py | Python | mit | 674 | 0.004451 |
from .base import Base
class Containing(Base):
_description = 'containing: {}'
def _check(self, value):
# This will check list like objects
for v in self.args:
if v not in value:
return False
# This will check dictionary like objects
for k, v in self.kwargs.items():
if k not in value or not value[k] == v:
return False
return True
| toddsifleet/equals | equals/constraints/containing.py | Python | mit | 440 | 0 |
""" Test the change_enrollment command line script."""
import ddt
import unittest
from uuid import uuid4
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from enrollment.api import get_enrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EnrollManagementCommandTest(SharedModuleStoreTestCase):
"""
Test the enroll_user_in_course management command
"""
@classmethod
def setUpClass(cls):
super(EnrollManagementCommandTest, cls).setUpClass()
cls.course = CourseFactory.create(org='fooX', number='007')
def setUp(self):
super(EnrollManagementCommandTest, self).setUp()
self.course_id = unicode(self.course.id)
self.username = 'ralph' + uuid4().hex
self.user_email = self.username + '@example.com'
UserFactory(username=self.username, email=self.user_email)
def test_enroll_user(self):
command_args = [
'--course', self.course_id,
'--email', self.user_email,
]
call_command(
'enroll_user_in_course',
*command_args
)
user_enroll = get_enrollment(self.username, self.course_id)
self.assertTrue(user_enroll['is_active'])
def test_enroll_user_twice(self):
"""
Ensures the command is idempotent.
"""
command_args = [
'--course', self.course_id,
'--email', self.user_email,
]
for _ in range(2):
call_command(
'enroll_user_in_course',
*command_args
)
# Second run does not impact the first run (i.e., the
# user is still enrolled, no exception was raised, etc)
user_enroll = get_enrollment(self.username, self.course_id)
self.assertTrue(user_enroll['is_active'])
@ddt.data(['--email', 'foo'], ['--course', 'bar'], ['--bad-param', 'baz'])
def test_not_enough_args(self, arg):
"""
When the command is missing certain arguments, it should
raise an exception
"""
command_args = arg
with self.assertRaises(CommandError):
call_command(
'enroll_user_in_course',
*command_args
)
| defance/edx-platform | common/djangoapps/enrollment/management/tests/test_enroll_user_in_course.py | Python | agpl-3.0 | 2,563 | 0.00039 |
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse_lazy
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'pipeye.views.home', name='home'),
url(r'^watches/', include('pipeye.watches.urls')),
url(r'^packages/', include('pipeye.packages.urls')),
url(r'^accounts/', include('pipeye.accounts.urls')),
# github login
url(r'^login/$', 'social_auth.views.auth',
{'backend': 'github'}, name='login'),
url(r'^login/complete/(?P<backend>\w+)/$',
'social_auth.views.complete', name='login_complete'),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': reverse_lazy('home')}, name='logout'),
# admin
url(r'^admin/', include(admin.site.urls)),
)
| oinopion/pipeye | pipeye/urls.py | Python | bsd-2-clause | 820 | 0.004878 |
#!/usr/bin/env python3
# encoding: utf-8
"""
main.py
The entry point for the book reader application.
"""
__version_info__ = (0, 0, 1)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "c.guenther@mac.com"
import time
import sqlite3
import pdb
import signal
import sys, os
import rfid
import config
import RPi.GPIO as GPIO
from player import Player
from status_light import StatusLight
from threading import Thread
class BookReader(object):
"""The main class that controls the player, the GPIO pins and the RFID reader"""
def __init__(self):
"""Initialize all the things"""
self.rfid_reader = rfid.Reader(**config.serial)
# setup signal handlers. SIGINT for KeyboardInterrupt
# and SIGTERM for when running from supervisord
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.status_light = StatusLight(config.status_light_pin)
thread = Thread(target=self.status_light.start)
thread.start()
self.setup_db()
self.player = Player(config.mpd_conn, self.status_light)
self.setup_gpio()
def setup_db(self):
"""Setup a connection to the SQLite db"""
self.db_conn = sqlite3.connect(config.db_file)
self.db_cursor = self.db_conn.cursor()
def setup_gpio(self):
"""Setup all GPIO pins"""
GPIO.setmode(GPIO.BCM)
# input pins for buttons
for pin in config.gpio_pins:
GPIO.setup(pin['pin_id'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pin['pin_id'], GPIO.FALLING, callback=getattr(self.player, pin['callback']), bouncetime=pin['bounce_time'])
def signal_handler(self, signal, frame):
"""When quiting, stop playback, close the player and release GPIO pins"""
self.player.close()
self.status_light.exit()
GPIO.cleanup()
sys.exit(0)
def loop(self):
"""The main event loop. This is where we look for new RFID cards on the RFID reader. If one is
present and different from the book that's currently playing, in which case:
1. Stop playback of the current book if one is playing
2. Start playing
"""
while True:
if self.player.is_playing():
self.on_playing()
elif self.player.finished_book():
# when at the end of a book, delete its progress from the db
# so we can listen to it again
self.db_cursor.execute(
'DELETE FROM progress WHERE book_id = %d' % self.player.book.book_id)
self.db_conn.commit()
self.player.book.reset()
rfid_card = self.rfid_reader.read()
if not rfid_card:
continue
book_id = rfid_card.get_id()
if book_id and book_id != self.player.book.book_id: # a change in book id
progress = self.db_cursor.execute(
'SELECT * FROM progress WHERE book_id = "%s"' % book_id).fetchone()
self.player.play(book_id, progress)
def on_playing(self):
"""Executed for each loop execution. Here we update self.player.book with the latest known position
and save the prigress to db"""
status = self.player.get_status()
self.player.book.elapsed = float(status['elapsed'])
self.player.book.part = int(status['song']) + 1
#print "%s second of part %s" % (self.player.book.elapsed, self.player.book.part)
self.db_cursor.execute(
'INSERT OR REPLACE INTO progress (book_id, part, elapsed) VALUES (%s, %d, %f)' %\
(self.player.book.book_id, self.player.book.part, self.player.book.elapsed))
self.db_conn.commit()
if __name__ == '__main__':
reader = BookReader()
reader.loop()
| siliconchris1973/fairytale | RASPI-stuff/python-codeline/fairytale/main.py | Python | apache-2.0 | 3,954 | 0.006576 |
#!/usr/bin/python
import cairo
import gtk
import copy
def abs(x):
if x < 0:
return -x
else:
return x
class object:
def __init__(self, name, x, y, w, h):
self.name = name
self.x = x
self.y = y
self.w = w
self.h = h
# huddung vector
self.dx = 0
self.dy = 0
# 0 = normal ; 1 = active ; 2 = selected
self.status = 0
self.moving = False
self.offsetx = 0
self.offsety = 0
self.anchor = 0
self.dirty = False
def get_extents(self):
return self.x, self.y, self.w, self.h
def get_extents_after_hug(self):
# 173
# 506
# 284
if self.anchor == 1:
return self.x + self.dx, self.y + self.dy, self.w - self.dx, self.h - self.dy
elif self.anchor == 2:
return self.x + self.dx, self.y, self.w - self.dx, self.h + self.dy
elif self.anchor == 3:
return self.x, self.y + self.dy, self.w + self.dx, self.h - self.dy
elif self.anchor == 4:
return self.x, self.y, self.w + self.dx, self.h + self.dy
elif self.anchor == 5:
return self.x + self.dx, self.y, self.w - self.dx, self.h
elif self.anchor == 6:
return self.x, self.y, self.w + self.dx, self.h
elif self.anchor == 7:
return self.x, self.y + self.dy, self.w, self.h - self.dy
elif self.anchor == 8:
return self.x, self.y, self.w, self.h + self.dy
else:
return self.x + self.dx, self.y + self.dy, self.w, self.h
def inbox(self, x, y, bx, by, bw=10, bh=10):
x2 = bx + bw
y2 = by + bh
return ( x > bx ) and ( y > by ) and ( x < x2 ) and ( y < y2 )
def hit(self, x, y):
return self.inbox(x, y, self.x, self.y, self.w, self.h)
def collide(self, ax, ay, ax2, ay2):
if ax < ax2:
x = ax
x2 = ax2
else:
x = ax2
x2 = ax
if ay < ay2:
y = ay
y2 = ay2
else:
y = ay2
y2 = ay
ret = ( ( ( self.x <= x ) and ( (self.x+self.w) >= x ) ) or ( ( self.x >= x ) and ( self.x <= x2 ) ) )
ret = ret and ( ( ( self.y <= y ) and ( (self.y+self.h) >= y ) ) or ( ( self.y >= y ) and ( self.y <= y2 ) ) )
return ret
def activate(self):
if self.status < 1:
self.dirty = True
self.status = 1
def deactivate(self):
if self.status == 1:
self.dirty = True
self.status = 0
def select(self):
if self.status != 2:
self.dirty = True
self.status = 2
def deselect(self):
if self.status != 0:
self.dirty = True
self.status = 0
def onpress(self, x, y):
self.moving = True
self.offsetx = x - self.x
self.offsety = y - self.y
# 173
# 506
# 284
if ( self.offsetx <= 10 ) and ( self.offsety <= 10 ):
self.anchor = 1
elif ( self.offsetx <= 10 ) and ( self.offsety >= ( self.h - 10 ) ):
self.anchor = 2
elif ( self.offsety <= 10 ) and ( self.offsetx >= ( self.w - 10 ) ):
self.anchor = 3
elif ( self.offsetx >= ( self.w - 10 ) ) and ( self.offsety >= ( self.h - 10 ) ):
self.anchor = 4
elif self.inbox( self.offsetx, self.offsety, 0, (self.h/2)-5 ):
self.anchor = 5
elif self.inbox( self.offsetx, self.offsety, self.w-10, (self.h/2)-5 ):
self.anchor = 6
elif self.inbox( self.offsetx, self.offsety, (self.w/2)-5, 0 ):
self.anchor = 7
elif self.inbox( self.offsetx, self.offsety, (self.w/2)-5, self.h-10 ):
self.anchor = 8
else:
self.anchor = 0
def onrelease(self):
self.moving = False
if self.anchor == 1:
self.x = self.x + self.dx
self.y = self.y + self.dy
self.w = self.w - self.dx
self.h = self.h - self.dy
elif self.anchor == 2:
self.x = self.x + self.dx
self.w = self.w - self.dx
self.h = self.h + self.dy
elif self.anchor == 3:
self.y = self.y + self.dy
self.w = self.w + self.dx
self.h = self.h - self.dy
elif self.anchor == 4:
self.w = self.w + self.dx
self.h = self.h + self.dy
elif self.anchor == 5:
self.x = self.x + self.dx
self.w = self.w - self.dx
elif self.anchor == 6:
self.w = self.w + self.dx
elif self.anchor == 7:
self.y = self.y + self.dy
self.h = self.h - self.dy
elif self.anchor == 8:
self.h = self.h + self.dy
else:
self.x = self.x + self.dx
self.y = self.y + self.dy
self.dx = 0
self.dy = 0
def onmotion(self, x, y):
if self.moving:
oldx = self.x
oldy = self.y
oldw = self.w
oldh = self.h
oldoffx = self.offsetx
oldoffy = self.offsety
# 173
# 506
# 284
if self.anchor == 1:
self.w = self.w + self.x
self.x = x - self.offsetx
self.w = self.w - self.x
self.h = self.h + self.y
self.y = y - self.offsety
self.h = self.h - self.y
elif self.anchor == 2:
self.w = self.w + self.x
self.x = x - self.offsetx
self.w = self.w - self.x
self.h = self.h - self.offsety
self.offsety = y - self.y
self.h = self.h + self.offsety
elif self.anchor == 3:
self.h = self.h + self.y
self.y = y - self.offsety
self.h = self.h - self.y
self.w = self.w - self.offsetx
self.offsetx = x - self.x
self.w = self.w + self.offsetx
elif self.anchor == 4:
self.w = self.w - self.offsetx
self.offsetx = x - self.x
self.w = self.w + self.offsetx
self.h = self.h - self.offsety
self.offsety = y - self.y
self.h = self.h + self.offsety
elif self.anchor == 5:
self.w = self.w + self.x
self.x = x - self.offsetx
self.w = self.w - self.x
elif self.anchor == 6:
self.w = self.w - self.offsetx
self.offsetx = x - self.x
self.w = self.w + self.offsetx
elif self.anchor == 7:
self.h = self.h + self.y
self.y = y - self.offsety
self.h = self.h - self.y
elif self.anchor == 8:
self.h = self.h - self.offsety
self.offsety = y - self.y
self.h = self.h + self.offsety
else:
self.x = x - self.offsetx
self.y = y - self.offsety
if self.w < 10:
self.w = 10
if self.x != oldx:
self.x = oldx + oldw - 10
if self.offsetx != oldoffx:
self.offsetx = oldoffx - oldw + 10
if self.h < 10:
self.h = 10
if self.y != oldy:
self.y = oldy + oldh - 10
if self.offsety != oldoffy:
self.offsety = oldoffy - oldh + 10
self.dirty = True
elif self.hit(x, y):
if self.status == 0:
self.dirty = True
self.status = 1
else:
if self.status == 1:
self.dirty = True
self.status = 0
def onleave(self):
if self.status == 1:
self.dirty = True
self.status = 0
def draw(self, crctx):
a = 1
if self.moving:
a = 0.7
# hug
if ( self.dx != 0 ) or ( self.dy != 0 ):
tmp = cairo.ImageSurface(cairo.FORMAT_A8, 16, 16)
cr2 = cairo.Context(tmp)
cr2.set_source_rgba(0, 0, 0, 1)
cr2.set_line_width(8)
cr2.move_to(0, 0)
cr2.line_to(16, 16)
cr2.stroke()
cr2.move_to(12, -4)
cr2.line_to(20, 4)
cr2.stroke()
cr2.move_to(-4, 12)
cr2.line_to(4, 20)
cr2.stroke()
pat = cairo.SurfacePattern(tmp)
pat.set_extend(cairo.EXTEND_REPEAT)
crctx.set_source(pat)
x, y, w, h = self.get_extents_after_hug()
crctx.rectangle(x + 2, y + 2, w - 4, h - 4)
crctx.set_line_width(4)
crctx.stroke()
crctx.set_source_rgba(0.7, 0.7, 0.7, a)
crctx.rectangle(self.x, self.y, self.w, self.h)
crctx.fill()
crctx.set_line_width(2)
if self.status > 0:
if self.status == 1:
crctx.set_source_rgba(0.5, 0.5, 0.5, a)
elif self.status == 2:
crctx.set_source_rgba(0.1, 0, 0.5, a)
crctx.rectangle(self.x, self.y, self.w, self.h)
crctx.stroke()
# corner anchors
crctx.rectangle(self.x, self.y, 10, 10)
crctx.fill()
crctx.rectangle(self.x+self.w-10, self.y, 10, 10)
crctx.fill()
crctx.rectangle(self.x, self.y+self.h-10, 10, 10)
crctx.fill()
crctx.rectangle(self.x+self.w-10, self.y+self.h-10, 10, 10)
crctx.fill()
#edge anchors
crctx.rectangle(self.x, self.y+(self.h/2)-5, 10, 10)
crctx.fill()
crctx.rectangle(self.x+self.w-10, self.y+(self.h/2)-5, 10, 10)
crctx.fill()
crctx.rectangle(self.x+(self.w/2)-5, self.y, 10, 10)
crctx.fill()
crctx.rectangle(self.x+(self.w/2)-5, self.y+self.h-10, 10, 10)
crctx.fill()
else:
crctx.set_source_rgba(0, 0, 0, 1)
crctx.rectangle(self.x, self.y, self.w, self.h)
crctx.stroke()
xbearing, ybearing, width, height, xadvance, yadvance = crctx.text_extents ( self.name )
crctx.move_to( self.x + ( self.w / 2 ) + 0.5 - xbearing - ( width / 2 ), self.y + (self.h / 2 ) + 0.5 - ybearing - ( height / 2 ) )
crctx.set_source_rgba(0, 0, 0, 1)
crctx.show_text(self.name)
self.dirty = False
def hug(self, dx, dy):
self.dx = dx
self.dy = dy
class selection:
def __init__(self):
self.objects = []
self.active = None
def __str__(self):
ret = ""
for sel in self.objects:
ret += sel.__str__()
return ret
def get_extents(self):
x = None
y = None
x2 = None
y2 = None
for sel in self.objects:
if ( not x ) or ( x > sel.x ):
x = sel.x
if ( not y ) or ( y > sel.y ):
y = sel.y
if ( not x2 ) or ( x2 < ( sel.x + sel.w ) ):
x2 = sel.x + sel.w
if ( not y2 ) or ( y2 < ( sel.y + sel.h ) ):
y2 = sel.y + sel.h
return x, y, x2-x, y2-y
def select(self, obj):
try:
self.objects.index(obj)
except ValueError:
self.clear()
self.add(obj)
def add(self, obj):
self.objects.append(obj)
obj.select()
def remove(self, obj):
try:
self.objects.remove(obj)
obj.deselect()
except ValueError:
pass
def toggle(self, obj):
try:
self.objects.index(obj)
self.remove(obj)
except ValueError:
self.add(obj)
def motion(self, x, y):
dirty = False
for sel in self.objects:
sel.onmotion(x, y)
dirty = dirty or sel.dirty
return dirty
def press(self, obj, x, y):
obj.onpress(x, y)
for sel in self.objects:
if sel != obj:
sel.onpress(x, y)
sel.anchor = obj.anchor
def release(self):
for sel in self.objects:
sel.onrelease()
def clear(self):
mysel = copy.copy(self.objects)
for sel in mysel:
self.remove(sel)
def hug(self, dx, dy):
for sel in self.objects:
sel.hug(dx, dy)
def setWidth(self, width):
if self.active:
delta = width - self.active.w
for sel in self.objects:
if sel.w+delta > 0:
sel.w += delta
def setHeight(self, height):
if self.active:
delta = height - self.active.h
for sel in self.objects:
if sel.h+delta > 0:
sel.h += delta
def setXpos(self, xpos):
if self.active:
delta = xpos - self.active.x
for sel in self.objects:
sel.x += delta
def setYpos(self, ypos):
if self.active:
delta = ypos - self.active.y
for sel in self.objects:
sel.y += delta
class scene:
def __init__(self):
self.sel = selection()
self.reset()
def totop(self, obj):
self.top = obj
self.objects.remove(obj)
self.objects.append(obj)
def setgrid(self, dx, dy):
self.gridx = dx
self.gridy = dy
def leave(self, widget, event, data):
for obj in self.objects:
obj.onleave()
widget.queue_draw()
def motion(self, widget, event, data):
dirty = False
if self.top:
dirty = self.sel.motion(event.x, event.y)
# edge hugging
dx = 0
dy = 0
self.sel.hug( 0, 0 )
if event.state & gtk.gdk.MOD1_MASK:
x, y, w, h = self.sel.get_extents()
# align to grid
if self.gridx != 0:
if x % self.gridx <= 10:
dx = -( x % self.gridx )
if dx == 0 and -x % self.gridx <= 10:
dx = ( -x ) % self.gridx
if dx == 0 and ( ( x+w ) % self.gridx ) <= 10:
dx = - ( ( x+w ) % self.gridx )
if dx == 0 and ( ( -x-w ) % self.gridx ) <= 10:
dx = ( ( -x-w ) % self.gridx )
if self.gridy != 0:
if y % self.gridy <= 10:
dy = -( y % self.gridy )
if dy == 0 and ( -y ) % self.gridy <= 10:
dy = ( -y ) % self.gridy
if dy == 0 and ( ( y+h ) % self.gridy ) <= 10:
dy = - ( ( y+h ) % self.gridy )
if dy == 0 and ( ( -y-h ) % self.gridy ) <= 10:
dy = ( ( -y-h ) % self.gridy )
for obj in self.objects.__reversed__():
if obj.status == 2:
continue
x2, y2, w2, h2 = obj.get_extents()
if ( dy == 0 ) and ( ( ( x2 <= x ) and ( (x2+w2) >= x ) ) or ( ( x2 >= x ) and ( (x+w) >= x2 ) ) ):
if abs(y2+h2-y) <= 10:
dy = y2+h2-y
if (dy == 0) and (abs(y+h-y2) <= 10):
dy = y2-y-h
if ( dx == 0 ) and ( ( ( y2 <= y ) and ( (y2+h2) >= y ) ) or ( ( y2 >= y ) and ( (y+h) >= y2 ) ) ):
if abs(x2+w2-x) <= 10:
dx = x2+w2-x
if (dx == 0) and (abs(x+w-x2) <= 10):
dx = x2-x-w
if ( dx != 0 ) and ( dy != 0 ):
break
if ( dx == 0 ) and ( x <= 10 ):
dx = -x
elif ( dx == 0 ) and ( ( x+w ) >= ( self.w - 10 ) ):
dx = self.w - w - x
if ( dy == 0 ) and ( y <= 10 ):
dy = -y
elif ( dy == 0 ) and ( ( y+h ) >= ( self.h - 10 ) ):
dy = self.h - h - y
self.sel.hug( dx, dy )
self.sel.active = self.top
self.update(self.top)
else:
if self.moving:
self.xsel2 = event.x
self.ysel2 = event.y
dirty = True
for obj in self.objects:
if obj.collide(self.xsel, self.ysel, self.xsel2, self.ysel2):
obj.activate()
else:
obj.deactivate()
else:
active = False
for obj in self.objects.__reversed__():
if active:
obj.deactivate()
else:
obj.onmotion(event.x, event.y)
active = obj.hit(event.x, event.y)
dirty = dirty or obj.dirty
if dirty:
widget.queue_draw()
def press(self, widget, event, data):
hit = False
for obj in self.objects.__reversed__():
if obj.hit(event.x, event.y):
hit = True
if event.state & gtk.gdk.CONTROL_MASK:
self.sel.toggle(obj)
if obj.status == 0:
self.clearSel()
else:
self.totop(obj)
self.sel.select(obj)
if obj.status == 2:
self.sel.press(obj, event.x, event.y)
self.sel.active = obj
self.update(obj)
break
if not hit and event.button == 1:
if not (event.state & gtk.gdk.CONTROL_MASK):
self.sel.clear()
self.clearSel()
self.moving = True
self.xsel = event.x
self.ysel = event.y
self.xsel2 = event.x
self.ysel2 = event.y
if event.button == 3:
self.release(widget, event, data)
if hit:
self.objectMenu(event)
else:
self.sceneMenu(event)
widget.queue_draw()
def release(self, widget, event, data):
if self.moving:
for obj in self.objects:
if obj.status == 1:
self.sel.add(obj)
self.moving = False
else:
self.sel.release()
self.top = None
widget.queue_draw()
def expose(self, widget, event, data):
crctx = widget.window.cairo_create()
# draw grid
crctx.set_source_rgba(0.7, 0.7, 0.7, 1)
crctx.set_line_width(1)
if self.gridx != 0:
for x in range( 0, int(self.w), self.gridx ):
crctx.move_to( x, 0 )
crctx.line_to( x, self.h )
crctx.stroke()
if self.gridy != 0:
for y in range( 0, int(self.h), self.gridy ):
crctx.move_to( 0, y )
crctx.line_to( self.w, y )
crctx.stroke()
for obj in self.objects:
obj.draw(crctx)
if self.moving:
crctx.set_source_rgba(0.2, 0.2, 0.7, 0.1)
crctx.rectangle(self.xsel, self.ysel, self.xsel2 - self.xsel, self.ysel2 - self.ysel)
crctx.fill()
crctx.set_source_rgba(0, 0, 0, 0.3)
crctx.set_line_width(2)
crctx.rectangle(self.xsel, self.ysel, self.xsel2 - self.xsel, self.ysel2 - self.ysel)
crctx.stroke()
def setSize(self, w, h):
self.w = w
self.h = h
def getWidth(self):
return self.w
def getHeight(self):
return self.h
def connect(self, widget):
self.handler1 = widget.connect("leave-notify-event", self.leave, self)
self.handler2 = widget.connect("motion-notify-event", self.motion, self)
self.handler3 = widget.connect("button-press-event", self.press, self)
self.handler4 = widget.connect("button-release-event", self.release, self)
self.handler5 = widget.connect("expose-event", self.expose, self)
def disconnect(self, widget):
widget.disconnect(self.handler1)
widget.disconnect(self.handler2)
widget.disconnect(self.handler3)
widget.disconnect(self.handler4)
widget.disconnect(self.handler5)
def resetSel(self):
self.sel.clear()
self.clearSel()
self.sel = selection()
def reset(self):
self.resetSel()
self.objects = []
self.top = None
self.moving = False
self.xsel = 0
self.ysel = 0
self.xsel2 = 0
self.ysel2 = 0
self.gridx = 0
self.gridy = 0
def add(self, object, addToSel = False):
self.objects.append(object)
if addToSel: self.sel.add(object)
def delete(self):
for obj in self.sel.objects:
self.objects.remove( obj );
self.sel.clear()
self.clearSel()
def setName(self, widget, name):
if self.sel.active:
self.sel.active.name = name
widget.queue_draw()
def setWidth(self, widget, width):
self.sel.setWidth(width)
widget.queue_draw()
def setHeight(self, widget, height):
self.sel.setHeight(height)
widget.queue_draw()
def setXpos(self, widget, xpos):
self.sel.setXpos(xpos)
widget.queue_draw()
def setYpos(self, widget, ypos):
self.sel.setYpos(ypos)
widget.queue_draw()
def clearSel(self):
self.sel.active = None
| Newterm/florence | editor/toolkit.py | Python | gpl-2.0 | 16,795 | 0.050491 |
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Interface to OpenShift oc command
"""
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shlex
import atexit
import shutil
import string
import random
import yaml
import subprocess
# pylint: disable=bare-except
def cleanup_file(inc_file):
""" clean up """
try:
os.unlink(inc_file)
except:
pass
class OCUtil(object):
""" Wrapper for interfacing with OpenShift 'oc' utility """
def __init__(self, namespace='default', config_file='/tmp/admin.kubeconfig', verbose=False, logger=None):
"""
Take initial values for running 'oc'
Ensure to set non-default namespace if that is what is desired
"""
self.namespace = namespace
self.config_file = config_file
self.verbose = verbose
self.copy_kubeconfig()
self.logger = logger
def copy_kubeconfig(self):
""" make a copy of the kubeconfig """
file_name = os.path.join(
'/tmp',
''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(7))
)
shutil.copy(self.config_file, file_name)
atexit.register(cleanup_file, file_name)
self.config_file = file_name
def _run_cmd(self, cmd, base_cmd='oc', ):
""" Actually execute the command """
cmd = " ".join([base_cmd, '--config', self.config_file, '-n', self.namespace, cmd])
if self.logger:
self.logger.debug("ocutil._run_cmd( {} )".format(cmd))
cmd = shlex.split(cmd)
if self.verbose:
print "Running command: {}".format(str(cmd))
try:
return subprocess.check_output(cmd)
except subprocess.CalledProcessError as err:
if self.logger:
self.logger.exception('Error from server: %s' % err.output)
raise err
def _run_cmd_yaml(self, cmd, base_cmd='oc', yaml_cmd='-o yaml'):
""" Actually execute the command and expects yaml """
return yaml.safe_load(self._run_cmd(" ".join([cmd, yaml_cmd]), base_cmd=base_cmd))
def run_user_cmd(self, cmd, base_cmd='oc'):
""" Runs a custom user command """
return self._run_cmd(cmd, base_cmd=base_cmd)
def run_user_cmd_yaml(self, cmd, base_cmd='oc', yaml_cmd='-o yaml'):
"""Runs a custom user command and expects yaml"""
return self._run_cmd_yaml(cmd, base_cmd=base_cmd, yaml_cmd=yaml_cmd)
def get_secrets(self, name):
""" Get secrets from object 'name' """
return self._run_cmd_yaml("get secrets {}".format(name))
def get_endpoint(self, name):
""" Get endpoint details """
return self._run_cmd_yaml("get endpoints {}".format(name))
def get_service(self, name):
""" Get service details """
return self._run_cmd_yaml("get service {}".format(name))
def get_rc(self, name):
""" Get replication controller details """
return self._run_cmd_yaml("get rc {}".format(name))
def get_dc(self, name):
""" Get deployment config details """
return self._run_cmd_yaml("get dc {}".format(name))
def get_route(self, name):
""" Get routes details """
return self._run_cmd_yaml("get route {}".format(name))
def get_pods(self):
""" Get all the pods in the namespace """
return self._run_cmd_yaml("get pods")
def get_projects(self):
""" Get all projects in the cluster """
return self._run_cmd_yaml("get projects")
def get_nodes(self):
""" Get all the nodes in the cluster """
return self._run_cmd_yaml("get nodes")
def get_log(self, name):
""" Gets the log for the specified container """
return self._run_cmd("logs {}".format(name))
| rhdedgar/openshift-tools | openshift_tools/monitoring/ocutil.py | Python | apache-2.0 | 4,378 | 0.001827 |
"""Wrapper for running Freebayes.
"""
import collections
import errno
import fileinput
import glob
import tempfile
import os
import shutil
import subprocess
import vcf
from celery import task
from django.conf import settings
from main.models import Dataset
from main.model_utils import get_dataset_with_type
from pipeline.read_alignment_util import ensure_bwa_index
from pipeline.variant_calling.common import add_vcf_dataset
from pipeline.variant_calling.common import process_vcf_dataset
from pipeline.variant_calling.common import get_common_tool_params
from pipeline.variant_calling.constants import TOOL_FREEBAYES
from pipeline.variant_effects import run_snpeff
from utils import uppercase_underscore
VCF_AF_HEADER = '##FORMAT=<ID=AF,Number=1,Type=Float,Description="Alternate allele observation frequency, AO/(RO+AO)">'
def freebayes_regions(ref_genome,
region_size=settings.FREEBAYES_REGION_SIZE):
"""
Use bamtools (installed as part of freebayes) to intelligently
generate regions that will be run in freebayes in parallel.
ref_genome: the reference genome object
region_size: how many bases each parallelized region 'chunk' will be
"""
ref_genome_fasta = get_dataset_with_type(ref_genome,
Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
# ensure that the fasta file has an index
ensure_bwa_index(ref_genome_fasta)
ref_genome_faidx = ref_genome_fasta + '.fai'
regions = []
with open(ref_genome_faidx) as faidx_fh:
for line in faidx_fh:
fields = line.strip().split('\t')
chr_name, chr_len = fields[:2]
chr_len = int(chr_len)
end = 0
while end < chr_len:
start = end
end = start + region_size
if end > chr_len:
end = chr_len
regions.append('{chr_name}:{start}-{end}'.format(
chr_name=chr_name,
start=start,
end=end))
start = end
return regions
def run_freebayes(fasta_ref, sample_alignments, vcf_output_dir,
vcf_output_filename, alignment_type, region=None, **kwargs):
"""Run freebayes using the bam alignment files keyed by the alignment_type
for all Genomes of the passed in ReferenceGenome.
NOTE: If a Genome doesn't have a bam alignment file with this
alignment_type, then it won't be used.
Returns:
Boolean, True if successfully made it to the end, else False.
"""
print 'RUNNING FREEBAYES...'
bam_files = [
get_dataset_with_type(sa, alignment_type).get_absolute_location()
for sa in sample_alignments]
# Build up the bam part of the freebayes binary call.
bam_part = []
for bam_file in bam_files:
bam_part.append('--bam')
bam_part.append(bam_file)
# Determine alignment ploidy (haploid or diploid).
alignment_group = sample_alignments[0].alignment_group
if alignment_group.alignment_options['call_as_haploid']:
alignment_ploidy = 1
else:
alignment_ploidy = 2
other_args_part = [
'--fasta-reference', fasta_ref,
'--pvar', '0.001',
'--ploidy', str(alignment_ploidy),
'--min-alternate-fraction', '.3',
'--no-population-priors',
# '--binomial-obs-priors-off',
'--use-mapping-quality',
'--min-base-quality', '25',
'--min-mapping-quality', '30'
]
if region:
other_args_part.extend(['--region', region])
# Build the full command and execute it for all bam files at once.
full_command = (
['%s/freebayes/freebayes' % settings.TOOLS_DIR] +
bam_part +
other_args_part)
print ' '.join(full_command)
# Run Freebayes.
with open(vcf_output_filename + '.error', 'w') as error_output_fh:
with open(vcf_output_filename, 'w') as fh:
subprocess.check_call(
full_command, stdout=fh, stderr=error_output_fh)
# add the allele frequency FORMAT field to the vcf.
process_freebayes_region_vcf(vcf_output_filename)
return True # success
def process_freebayes_region_vcf(vcf_output_filename):
"""
Processes vcf before region merging.
IF AO and RO are available for an allele, also add alt allele
percentages (AF), as percentage of total depth can be a good way to filter
het/hom calls.
"""
# store the modified VCF in this temporary file, then move it to overwrite
# the original file when done adding this field.
temp_fh = tempfile.NamedTemporaryFile(delete=False)
with open(vcf_output_filename, 'r') as vcf_input_fh:
vcf_reader = vcf.Reader(vcf_input_fh)
# Generate extra header row for AF = AO/(RO+AO).
vcf_reader._header_lines.append(VCF_AF_HEADER)
key, val = vcf.parser._vcf_metadata_parser().read_format(VCF_AF_HEADER)
vcf_reader.formats[key] = val
# A list of all the FORMAT genotype keys, in order
format_keys = vcf_reader.formats.keys()
vcf_writer = vcf.Writer(temp_fh, vcf_reader)
# Write the old records with the new AF FORMAT field
for record in vcf_reader:
# This simply appends ':AF' to the record format field
record.add_format('AF')
# check if there are multiple alternate alleles
multi_alts = len(record.ALT) > 1
for sample in record.samples:
# Get alt allele frequencies for each alternate allele.
try:
# TODO: Right now, summing multiple alternate alleles because
# we turn arrays into strings in the UI.
if multi_alts:
total_obs = float(sum(sample['AO']) + sample['RO'])
if total_obs > 0:
af = sum([float(ao) / total_obs for ao in sample['AO']])
# if a single alternate allele:
else:
total_obs = float(sample['AO'] + sample['RO'])
if total_obs > 0:
af = float(sample['AO']) / total_obs
except:
af = 0.0
# new namedtuple with the additional format field
CallData = collections.namedtuple(
'CallData',
sample.data._fields+('AF',))
sample.data = CallData(*sample.data, AF=af)
vcf_writer.write_record(record)
# close the writer and move the temp file over the original to replace it
vcf_writer.close()
shutil.move(temp_fh.name, vcf_output_filename)
print 'moved from {} to {}'.format(temp_fh.name, vcf_output_filename)
def merge_freebayes_parallel(alignment_group):
"""
Merge, sort, and make unique all regional freebayes variant calls after
parallel execution.
Returns the Dataset pointing to the merged vcf file. If no freebayes files,
returns None.
"""
# First, grab all freebayes parallel vcf files.
common_params = get_common_tool_params(alignment_group)
partial_freebayes_vcf_output_dir = os.path.join(
common_params['output_dir'], 'freebayes')
# Glob all the parial (region-specific) vcf files.
# Assert that there is at least one.
vcf_output_filename_prefix = os.path.join(partial_freebayes_vcf_output_dir,
uppercase_underscore(common_params['alignment_type']) +
'.partial.*.vcf')
vcf_files = glob.glob(vcf_output_filename_prefix)
if not len(vcf_files):
return None
# Generate output filename.
vcf_ouput_filename_merged = os.path.join(partial_freebayes_vcf_output_dir,
uppercase_underscore(common_params['alignment_type']) + '.vcf')
vcf_ouput_filename_merged_fh = open(vcf_ouput_filename_merged, 'w')
streamsort_cmd = ' '.join([
settings.VCFSTREAMSORT_BINARY,
'-w 1000 | ',
settings.VCFUNIQ_BINARY])
# create a pipe to write to that will sort all the sub-vcfs
stream_merge_proc = subprocess.Popen(streamsort_cmd,
stdin=subprocess.PIPE,
stdout=vcf_ouput_filename_merged_fh,
shell=True)
# concatenate all the vcf files w/ fileinput and keep all but the first
# header and write to the stream_merge_proc stdin pipe
header=True
for line in fileinput.input(vcf_files):
#https://gist.github.com/waylan/2353749
try:
if line.startswith('##'):
if header:
stream_merge_proc.stdin.write(line)
continue
elif line.startswith('#'):
if header:
stream_merge_proc.stdin.write(line)
header=False
continue
stream_merge_proc.stdin.write(line)
except IOError as e:
if e.errno == errno.EPIPE or e.errno == errno.EINVAL:
# Stop loop on "Invalid pipe" or "Invalid argument".
# No sense in continuing with broken pipe.
break
else:
# Raise any other error.
raise
# close the stdin to the stream merge, wait for it to finish,
# and close the file.
stream_merge_proc.stdin.close()
stream_merge_proc.wait()
vcf_ouput_filename_merged_fh.close()
vcf_dataset_type = Dataset.TYPE.VCF_FREEBAYES
# add unannotated vcf dataset first
vcf_dataset = add_vcf_dataset(alignment_group, vcf_dataset_type,
vcf_ouput_filename_merged)
# If genome is annotated then run snpeff now,
# then update the vcf_output_filename and vcf_dataset_type.
if alignment_group.reference_genome.is_annotated():
vcf_ouput_filename_merged_snpeff = run_snpeff(
alignment_group, TOOL_FREEBAYES)
vcf_dataset_type = Dataset.TYPE.VCF_FREEBAYES_SNPEFF
vcf_dataset = add_vcf_dataset(
alignment_group, vcf_dataset_type,
vcf_ouput_filename_merged_snpeff)
# generate variants, process, etc
process_vcf_dataset(alignment_group, vcf_dataset_type)
#remove the partial vcfs
for filename in vcf_files:
os.remove(filename)
return vcf_dataset
| woodymit/millstone | genome_designer/pipeline/variant_calling/freebayes.py | Python | mit | 10,343 | 0.002127 |
# This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
from lib.transition import Transition
from lib.buffer_utils import BufferUtils
class RadialWipe(Transition):
"""
Implements a radial wipe (Iris) transition
"""
def __init__(self, app):
Transition.__init__(self, app)
def __str__(self):
return "Radial Wipe"
def reset(self):
locations = self._app.scene.get_all_pixel_locations()
locations -= self._app.scene.center_point()
#locations -= locations[np.random.randint(0, len(locations) - 1)]
locations = np.square(locations)
self.distances = locations.T[0] + locations.T[1]
self.distances /= max(self.distances)
def get(self, start, end, progress):
buffer = np.where(self.distances < progress, end.T, start.T)
buffer[1][np.abs(self.distances - progress) < 0.02] += 0.5 # we can apply effects to transition line here
return buffer.T
| nyarasha/firemix | plugins/radial_wipe.py | Python | gpl-3.0 | 1,645 | 0.001824 |
# -*- coding: utf-8 -*-
'''
Copyright 2011 NTT Software Corporation.
All Rights Reserved.
@author NTT Software Corporation.
@version 1.0.0
$Date: 2010-08-31 09:54:14 +0900 (火, 31 8 2010) $
$Revision: 435 $
$Author: NTT Software Corporation. $
'''
import os
import sys
from optparse import OptionParser
from nw import NwHandler
import csv
import logging.config
#キー
KEY_ENV_VHUT_HOME = 'VHUT_HOME'
#設定ファイル
PATH_CONFIG = os.environ[KEY_ENV_VHUT_HOME]+"/agent/conf/vhuta.conf"
#NW設定DBファイル
PATH_DATA_NW = os.environ[KEY_ENV_VHUT_HOME]+"/agent/data/nw.db"
def main():
usage = "%prog --action ACTION [parameter options]"
psr = OptionParser(usage=usage)
psr.add_option('--role', action='store', type='string', default=False, dest='role', help="server's role (manager/node)")
psr.add_option('--public_if', action='store', type='string', default=False, dest='public_if', help="server's public network interface")
psr.add_option('--private_if', action='store', type='string', default=False, dest='private_if', help="server's private network interface")
psr.add_option('--private_network', action='store', type='string', default=False, dest='private_network', help="server's own network address")
psr.add_option('--private_netmask', action='store', type='string', default=False, dest='private_netmask', help="server's own network netmask")
psr.add_option('--log', action='store', type='string', default=False, dest='log', help='logfile path')
psr.add_option('--loglevel', action='store', type='string', default=False, dest='loglevel', help='loglevel (DEBUG/INFO/WARING/ERROR/CRITICAL)')
# psr.add_option('--action', action='store', type='string', dest='action', help='network management action (wipe/revive/show/add_network/del_network/add_ip/del_ip/add_nat/del_nat/set_filter/add_instance_bridge/del_instance_bridge/init_network)')
psr.add_option('--action', action='store', type='string', dest='action', help='network management action (init/clear/show/add_network/del_network/add_ip/del_ip/add_nat/del_nat/set_filter/import)')
psr.add_option('--vlan' , action='store', type='int', dest='vlan', help='VLAN ID')
psr.add_option('--network', action='store', type='string', dest='network', help='network address for VLAN')
psr.add_option('--netmask', action='store', type='string', dest='netmask', help='netmask for VLAN')
psr.add_option('--gateway', action='store', type='string', dest='gateway', help='gateway address for VLAN')
psr.add_option('--broadcast', action='store', type='string', dest='broadcast', help='broadcat address for VLAN')
psr.add_option('--nameserver', action='store', type='string', dest='nameserver', help='nameserver address for VLAN')
psr.add_option('--dhcp', action='store', type='string', dest='dhcp', help='dhcp address for VLAN')
psr.add_option('--username', action='store', type='string', dest='username', help='user name of VLAN')
psr.add_option('--ip' , action='store', type='string', dest='ip', help="instance's IP address")
psr.add_option('--mac', action='store', type='string', dest='mac', help="instance's MAC address")
psr.add_option('--publicip', action='store', type='string', dest='publicip', help='public IP address binding by NAT')
psr.add_option('--privateip', action='store', type='string', dest='privateip', help='private IP address binding by NAT')
psr.add_option('--bridge', action='store', type='string', dest='bridge', help='instance bridge prefix name')
psr.add_option('--filtertype', action='store', type='string', dest='filtertype', help='netfilter filter action type (open/close)')
psr.add_option('--destname', action='store', type='string', dest='destname', help='netfilter filter destination user name')
psr.add_option('--sourcename', action='store', type='string',default=False, dest='sourcename', help='netfilter filter source user name')
psr.add_option('--sourcenet', action='store', type='string', default=False, dest='sourcenet', help='netfilter filter source network')
psr.add_option('--protocol', action='store', type='string', default=False, dest='protocol', help='netfilter filter protocol name')
psr.add_option('--minport', action='store', type='string', default=False, dest='minport', help='netfilter filter port range min')
psr.add_option('--maxport', action='store', type='string', default=False, dest='maxport', help='netfilter filter port range max')
psr.add_option('--csv', action='store', type='string', default=False, dest='csv', help='import csv file path')
psr.add_option('--nodump', action="store_true", dest="nodump", default=False, help='do not write db flag')
(opts, args) = psr.parse_args(sys.argv)
nwa = NwHandler(PATH_CONFIG, PATH_DATA_NW)
if opts.action:
if opts.action == 'import':
if opts.csv:
reader = csv.DictReader(file(opts.csv, "rb"))
for network in reader:
if nwa.add_network(network["vlan"], network["address"], network["mask"], network["broadcast"], network["gateway"], network["dns"], network["dhcp"], network["name"], get_nodump(opts)):
print "%s is added." % network["name"]
else:
print "%s is faild!" % network["name"]
exit(1)
print "init network: done."
else:
print "We need those options: --csv."
elif opts.action == 'init':
if nwa.init(False, get_nodump(opts)):
print "init: done."
else:
print "init: failed!"
elif opts.action == 'clear':
if nwa.init(True, get_nodump(opts)):
print "clear: done."
else:
print "clear: failed!"
# elif opts.action == 'revive':
# if nwa.revive():
# print "revive: done."
# else:
# print "revive: failed!"
# exit(1)
elif opts.action == 'show':
config_print(nwa.get_config())
elif opts.action == 'add_network':
if opts.vlan and opts.network and opts.netmask and opts.broadcast and opts.gateway and opts.nameserver and opts.username:
if nwa.add_network(opts.vlan, opts.network, opts.netmask, opts.broadcast, opts.gateway, opts.nameserver, opts.username, get_nodump(opts)):
print "add network: done."
else:
print "add network: failed!"
else:
print "We need those options: --vlan, --network, --netmask,--broadcast, --gateway, --nameserver, --dhcp, --username."
exit(1)
elif opts.action == 'del_network':
if opts.vlan:
if nwa.del_network(opts.vlan, get_nodump(opts)):
print "del network: done."
else:
print "del network: failed!"
else:
print "We need those options: --vlan."
exit(1)
elif opts.action == 'add_ip':
if opts.ip and opts.mac:
if nwa.add_ip(opts.ip, opts.mac, get_nodump(opts)):
print "add ip: done."
else:
print "add ip: failed!"
else:
print "We need those options: --ip, --mac."
exit(1)
elif opts.action == 'del_ip':
if opts.ip and opts.mac:
if nwa.del_ip(opts.ip, opts.mac, get_nodump(opts)):
print "del ip: done."
else:
print "del ip: failed!"
else:
print "We need those options: --ip, --mac."
elif opts.action == 'add_nat':
if opts.publicip and opts.privateip:
if nwa.add_nat(opts.privateip, opts.publicip, get_nodump(opts)):
print "add nat: done."
else:
print "add nat: failed!"
else:
print "We need those options: --publicip, --privateip."
elif opts.action == 'del_nat':
if opts.publicip and opts.privateip:
if nwa.del_nat(opts.privateip, opts.publicip, get_nodump(opts)):
print "del nat: done."
else:
print "del nat: failed!"
exit(1)
else:
print "We need those options: --publicip, --privateip."
elif opts.action == 'set_filter':
if opts.filtertype and opts.destname and (opts.sourcename or opts.sourcenet):
if nwa.set_filter(opts.filtertype, opts.destname, other_username=opts.sourcename,
other_net=opts.sourcenet, protocol=opts.protocol, minport=opts.minport, maxport=opts.maxport, nodump=get_nodump(opts)):
print "set filter: done."
else:
print "set filter: failed!"
else:
print "We need those options: --filtertype, --destname, --sourcename or --sourcenet."
# elif opts.action == 'add_instance_bridge':
# if opts.vlan and opts.bridge:
# if dvn.add_instance_bridge(opts.vlan, opts.bridge):
# print "add instance bridge: done."
# else:
# print "add instance bridge: failed!"
# exit(1)
# else:
# print "We need those options: --vlan, --bridge."
# exit(1)
# elif opts.action == 'del_instance_bridge':
# if opts.vlan and opts.bridge:
# if dvn.del_instance_bridge(opts.vlan, opts.bridge):
# print "add instance bridge: done."
# else:
# print "add instance bridge: failed!"
# exit(1)
# else:
# print "We need those options: --vlan, --bridge."
# exit(1)
# else:
# psr.print_help()
else:
print "We need at least this option: --action."
print "\n"
psr.print_help()
def config_print(conf):
print '================= Configuration ================='
# print "role: %s" % conf.role
print "public IF: %s" % conf.public_if
print "private IF: %s" % conf.private_if
print "server's network: %s/%s" % (conf.private_network, conf.private_netmask)
for i in conf.networks.keys():
print '-------------'
print "VLAN ID: %d" % i
print "network: %s" % conf.networks[i]['network']
print "netmask: %s" % conf.networks[i]['netmask']
print "broadcast: %s" % conf.networks[i]['broadcast']
print "gateway: %s" % conf.networks[i]['gateway']
print "nameserver: %s" % conf.networks[i]['nameserver']
print "username: %s" % conf.networks[i]['username']
print '-------------'
print "publicips: %s" % conf.publicips
print "addresses: %s" % conf.addresses
print 'netfilter: start->'
print "%s" % conf.netfilter
print 'netfilter: <-end'
print '================================================='
return True
def get_nodump(opts):
if opts.nodump:
return 1
else:
return
if (__name__ == "__main__"):
logging.config.fileConfig(os.environ[KEY_ENV_VHUT_HOME]+"/agent/conf/log.conf")
if os.getuid() == 0:
main()
# exit()
else:
print "You must be root."
exit(1)
# =====================================================================
#
# Copyright 2011 NTT Sofware Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =====================================================================
| kuenishi/vHut | src/admin_server/vhut/agent/vhut/vhutac.py | Python | gpl-2.0 | 12,562 | 0.006064 |
# -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numbers
import warnings
import numpy as np
import traits.api as t
from scipy import constants
from hyperspy._signals.spectrum import Spectrum
from hyperspy.misc.elements import elements as elements_db
import hyperspy.axes
from hyperspy.decorators import only_interactive
from hyperspy.gui.eels import TEMParametersUI
from hyperspy.defaults_parser import preferences
import hyperspy.gui.messages as messagesui
from hyperspy.misc.progressbar import progressbar
from hyperspy.components import PowerLaw
from hyperspy.misc.utils import isiterable, closest_power_of_two, underline
from hyperspy.misc.utils import without_nans
class EELSSpectrum(Spectrum):
_signal_type = "EELS"
def __init__(self, *args, **kwards):
Spectrum.__init__(self, *args, **kwards)
# Attributes defaults
self.subshells = set()
self.elements = set()
self.edges = list()
if hasattr(self.metadata, 'Sample') and \
hasattr(self.metadata.Sample, 'elements'):
print('Elemental composition read from file')
self.add_elements(self.metadata.Sample.elements)
self.metadata.Signal.binned = True
def add_elements(self, elements, include_pre_edges=False):
"""Declare the elemental composition of the sample.
The ionisation edges of the elements present in the current
energy range will be added automatically.
Parameters
----------
elements : tuple of strings
The symbol of the elements. Note this input must always be
in the form of a tuple. Meaning: add_elements(('C',)) will
work, while add_elements(('C')) will NOT work.
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
Examples
--------
>>> s = signals.EELSSpectrum(np.arange(1024))
>>> s.add_elements(('C', 'O'))
Adding C_K subshell
Adding O_K subshell
Raises
------
ValueError
"""
if not isiterable(elements) or isinstance(elements, basestring):
raise ValueError(
"Input must be in the form of a tuple. For example, "
"if `s` is the variable containing this EELS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
for element in elements:
if element in elements_db:
self.elements.add(element)
else:
raise ValueError(
"%s is not a valid symbol of a chemical element"
% element)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
self.metadata.Sample.elements = list(self.elements)
if self.elements:
self.generate_subshells(include_pre_edges)
def generate_subshells(self, include_pre_edges=False):
"""Calculate the subshells for the current energy range for the
elements present in self.elements
Parameters
----------
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
"""
Eaxis = self.axes_manager.signal_axes[0].axis
if not include_pre_edges:
start_energy = Eaxis[0]
else:
start_energy = 0.
end_energy = Eaxis[-1]
for element in self.elements:
e_shells = list()
for shell in elements_db[element]['Atomic_properties']['Binding_energies']:
if shell[-1] != 'a':
if start_energy <= \
elements_db[element]['Atomic_properties']['Binding_energies'][shell][
'onset_energy (eV)'] \
<= end_energy:
subshell = '%s_%s' % (element, shell)
if subshell not in self.subshells:
print "Adding %s subshell" % (subshell)
self.subshells.add(
'%s_%s' % (element, shell))
e_shells.append(subshell)
def estimate_zero_loss_peak_centre(self, mask=None):
"""Estimate the posision of the zero-loss peak.
This function provides just a coarse estimation of the position
of the zero-loss peak centre by computing the position of the maximum
of the spectra. For subpixel accuracy use `estimate_shift1D`.
Parameters
----------
mask : Signal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
Returns
-------
zlpc : Signal subclass
The estimated position of the maximum of the ZLP peak.
Notes
-----
This function only works when the zero-loss peak is the most
intense feature in the spectrum. If it is not in most cases
the spectrum can be cropped to meet this criterium.
Alternatively use `estimate_shift1D`.
See Also
--------
estimate_shift1D, align_zero_loss_peak
"""
self._check_signal_dimension_equals_one()
self._check_navigation_mask(mask)
zlpc = self.valuemax(-1)
if self.axes_manager.navigation_dimension == 1:
zlpc = zlpc.as_spectrum(0)
elif self.axes_manager.navigation_dimension > 1:
zlpc = zlpc.as_image((0, 1))
if mask is not None:
zlpc.data[mask.data] = np.nan
return zlpc
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
**kwargs):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
def substract_from_offset(value, signals):
for signal in signals:
signal.axes_manager[-1].offset -= value
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
mean_ = without_nans(zlpc.data).mean()
if print_stats is True:
print
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
signal.shift1D(-zlpc.data + mean_)
if calibrate is True:
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
substract_from_offset(without_nans(zlpc.data).mean(),
also_align + [self])
if subpixel is False:
return
left, right = -3., 3.
if calibrate is False:
mean_ = without_nans(self.estimate_zero_loss_peak_centre(
mask=mask).data).mean()
left += mean_
right += mean_
left = (left if left > self.axes_manager[-1].axis[0]
else self.axes_manager[-1].axis[0])
right = (right if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1])
self.align1D(left, right, also_align=also_align, **kwargs)
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
if calibrate is True:
substract_from_offset(without_nans(zlpc.data).mean(),
also_align + [self])
def estimate_elastic_scattering_intensity(self,
threshold):
"""Rough estimation of the elastic scattering intensity by
truncation of a EELS low-loss spectrum.
Parameters
----------
threshold : {Signal, float, int}
Truncation energy to estimate the intensity of the
elastic scattering. The
threshold can be provided as a signal of the same dimension
as the input spectrum navigation space containing the
threshold value in the energy units. Alternatively a constant
threshold can be specified in energy/index units by passing
float/int.
Returns
-------
I0: Signal
The elastic scattering intensity.
See Also
--------
estimate_elastic_scattering_threshold
"""
# TODO: Write units tests
self._check_signal_dimension_equals_one()
if isinstance(threshold, numbers.Number):
I0 = self.isig[:threshold].integrate1D(-1)
I0.axes_manager.set_signal_dimension(
min(2, self.axes_manager.navigation_dimension))
else:
bk_threshold_navigate = (
threshold.axes_manager._get_axis_attribute_values('navigate'))
threshold.axes_manager.set_signal_dimension(0)
I0 = self._get_navigation_signal()
bk_I0_navigate = (
I0.axes_manager._get_axis_attribute_values('navigate'))
I0.axes_manager.set_signal_dimension(0)
pbar = hyperspy.misc.progressbar.progressbar(
maxval=self.axes_manager.navigation_size)
for i, s in enumerate(self):
threshold_ = threshold[self.axes_manager.indices].data[0]
if np.isnan(threshold_):
I0[self.axes_manager.indices] = np.nan
else:
I0[self.axes_manager.indices].data[:] = (
s[:threshold_].integrate1D(-1).data)
pbar.update(i)
pbar.finish()
threshold.axes_manager._set_axis_attribute_values(
'navigate',
bk_threshold_navigate)
I0.axes_manager._set_axis_attribute_values(
'navigate',
bk_I0_navigate)
I0.metadata.General.title = (
self.metadata.General.title + ' elastic intensity')
if self.tmp_parameters.has_item('filename'):
I0.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_elastic_intensity')
I0.tmp_parameters.folder = self.tmp_parameters.folder
I0.tmp_parameters.extension = \
self.tmp_parameters.extension
return I0
def estimate_elastic_scattering_threshold(self,
window=10.,
tol=None,
number_of_points=5,
polynomial_order=3,
start=1.):
"""Calculate the first inflexion point of the spectrum derivative
within a window.
This method assumes that the zero-loss peak is located at position zero
in all the spectra. Currently it looks for an inflexion point, that can
be a local maximum or minimum. Therefore, to estimate the elastic
scattering threshold `start` + `window` must be less than the first
maximum for all spectra (often the bulk plasmon maximum). If there is
more than one inflexion point in energy the window it selects the
smoother one what, often, but not always, is a good choice in this
case.
Parameters
----------
window : {None, float}
If None, the search for the local inflexion point is performed
using the full energy range. A positive float will restrict
the search to the (0,window] energy window, where window is given
in the axis units. If no inflexion point is found in this
spectral range the window value is returned instead.
tol : {None, float}
The threshold tolerance for the derivative. If "auto" it is
automatically calculated as the minimum value that guarantees
finding an inflexion point in all the spectra in given energy
range.
number_of_points : int
If non zero performs order three Savitzky-Golay smoothing
to the data to avoid falling in local minima caused by
the noise.
polynomial_order : int
Savitzky-Golay filter polynomial order.
start : float
Position from the zero-loss peak centre from where to start
looking for the inflexion point.
Returns
-------
threshold : Signal
A Signal of the same dimension as the input spectrum
navigation space containing the estimated threshold. Where the
threshold couldn't be estimated the value is set to nan.
See Also
--------
estimate_elastic_scattering_intensity,align_zero_loss_peak,
find_peaks1D_ohaver, fourier_ratio_deconvolution.
Notes
-----
The main purpose of this method is to be used as input for
`estimate_elastic_scattering_intensity`. Indeed, for currently
achievable energy resolutions, there is not such a thing as a elastic
scattering threshold. Therefore, please be aware of the limitations of
this method when using it.
"""
self._check_signal_dimension_equals_one()
# Create threshold with the same shape as the navigation dims.
threshold = self._get_navigation_signal()
threshold.axes_manager.set_signal_dimension(0)
# Progress Bar
axis = self.axes_manager.signal_axes[0]
min_index, max_index = axis.value_range_to_indices(start,
start + window)
if max_index < min_index + 10:
raise ValueError("Please select a bigger window")
s = self.isig[min_index:max_index].deepcopy()
if number_of_points:
s.smooth_savitzky_golay(polynomial_order=polynomial_order,
number_of_points=number_of_points,
differential_order=1)
else:
s = s.diff(-1)
if tol is None:
tol = np.max(np.abs(s.data).min(axis.index_in_array))
saxis = s.axes_manager[-1]
inflexion = (np.abs(s.data) <= tol).argmax(saxis.index_in_array)
threshold.data[:] = saxis.index2value(inflexion)
if isinstance(inflexion, np.ndarray):
threshold.data[inflexion == 0] = np.nan
else: # Single spectrum
if inflexion == 0:
threshold.data[:] = np.nan
del s
if np.isnan(threshold.data).any():
warnings.warn("No inflexion point could we found in some positions "
"that have been marked with nans.")
# Create spectrum image, stop and return value
threshold.metadata.General.title = (
self.metadata.General.title +
' ZLP threshold')
if self.tmp_parameters.has_item('filename'):
threshold.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_ZLP_threshold')
threshold.tmp_parameters.folder = self.tmp_parameters.folder
threshold.tmp_parameters.extension = \
self.tmp_parameters.extension
threshold.axes_manager.set_signal_dimension(
min(2, self.axes_manager.navigation_dimension))
return threshold
def estimate_thickness(self,
threshold,
zlp=None,):
"""Estimates the thickness (relative to the mean free path)
of a sample using the log-ratio method.
The current EELS spectrum must be a low-loss spectrum containing
the zero-loss peak. The hyperspectrum must be well calibrated
and aligned.
Parameters
----------
threshold : {Signal, float, int}
Truncation energy to estimate the intensity of the
elastic scattering. The
threshold can be provided as a signal of the same dimension
as the input spectrum navigation space containing the
threshold value in the energy units. Alternatively a constant
threshold can be specified in energy/index units by passing
float/int.
zlp : {None, EELSSpectrum}
If not None the zero-loss
peak intensity is calculated from the ZLP spectrum
supplied by integration using Simpson's rule. If None estimates
the zero-loss peak intensity using
`estimate_elastic_scattering_intensity` by truncation.
Returns
-------
s : Signal
The thickness relative to the MFP. It returns a Spectrum,
Image or a Signal, depending on the currenct spectrum navigation
dimensions.
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
# TODO: Write units tests
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
total_intensity = self.integrate1D(axis.index_in_array).data
if zlp is not None:
I0 = zlp.integrate1D(axis.index_in_array).data
else:
I0 = self.estimate_elastic_scattering_intensity(
threshold=threshold,).data
t_over_lambda = np.log(total_intensity / I0)
s = self._get_navigation_signal()
s.data = t_over_lambda
s.metadata.General.title = (self.metadata.General.title +
' $\\frac{t}{\\lambda}$')
if self.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_relative_thickness')
s.tmp_parameters.folder = self.tmp_parameters.folder
s.tmp_parameters.extension = \
self.tmp_parameters.extension
return s
def fourier_log_deconvolution(self,
zlp,
add_zlp=False,
crop=False):
"""Performs fourier-log deconvolution.
Parameters
----------
zlp : EELSSpectrum
The corresponding zero-loss peak.
add_zlp : bool
If True, adds the ZLP to the deconvolved spectrum
crop : bool
If True crop the spectrum to leave out the channels that
have been modified to decay smoothly to zero at the sides
of the spectrum.
Returns
-------
An EELSSpectrum containing the current data deconvolved.
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
self._check_signal_dimension_equals_one()
s = self.deepcopy()
zlp_size = zlp.axes_manager.signal_axes[0].size
self_size = self.axes_manager.signal_axes[0].size
tapped_channels = s.hanning_taper()
# Conservative new size to solve the wrap-around problem
size = zlp_size + self_size - 1
# Increase to the closest power of two to enhance the FFT
# performance
size = closest_power_of_two(size)
axis = self.axes_manager.signal_axes[0]
z = np.fft.rfft(zlp.data, n=size, axis=axis.index_in_array)
j = np.fft.rfft(s.data, n=size, axis=axis.index_in_array)
j1 = z * np.nan_to_num(np.log(j / z))
sdata = np.fft.irfft(j1, axis=axis.index_in_array)
s.data = sdata[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, self_size)), ])]
if add_zlp is True:
if self_size >= zlp_size:
s.data[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, zlp_size)), ])
] += zlp.data
else:
s.data += zlp.data[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, self_size)), ])]
s.metadata.General.title = (s.metadata.General.title +
' after Fourier-log deconvolution')
if s.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_after_fourier_log_deconvolution')
if crop is True:
s.crop(axis.index_in_axes_manager,
None, int(-tapped_channels))
return s
def fourier_ratio_deconvolution(self, ll,
fwhm=None,
threshold=None,
extrapolate_lowloss=True,
extrapolate_coreloss=True):
"""Performs Fourier-ratio deconvolution.
The core-loss should have the background removed. To reduce
the noise amplication the result is convolved with a
Gaussian function.
Parameters
----------
ll: EELSSpectrum
The corresponding low-loss (ll) EELSSpectrum.
fwhm : float or None
Full-width half-maximum of the Gaussian function by which
the result of the deconvolution is convolved. It can be
used to select the final SNR and spectral resolution. If
None, the FWHM of the zero-loss peak of the low-loss is
estimated and used.
threshold : {None, float}
Truncation energy to estimate the intensity of the
elastic scattering. If None the threshold is taken as the
first minimum after the ZLP centre.
extrapolate_lowloss, extrapolate_coreloss : bool
If True the signals are extrapolated using a power law,
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
self._check_signal_dimension_equals_one()
orig_cl_size = self.axes_manager.signal_axes[0].size
if threshold is None:
threshold = ll.estimate_elastic_scattering_threshold()
if extrapolate_coreloss is True:
cl = self.power_law_extrapolation(
window_size=20,
extrapolation_size=100)
else:
cl = self.deepcopy()
if extrapolate_lowloss is True:
ll = ll.power_law_extrapolation(
window_size=100,
extrapolation_size=100)
else:
ll = ll.deepcopy()
ll.hanning_taper()
cl.hanning_taper()
ll_size = ll.axes_manager.signal_axes[0].size
cl_size = self.axes_manager.signal_axes[0].size
# Conservative new size to solve the wrap-around problem
size = ll_size + cl_size - 1
# Increase to the closest multiple of two to enhance the FFT
# performance
size = int(2 ** np.ceil(np.log2(size)))
axis = ll.axes_manager.signal_axes[0]
if fwhm is None:
fwhm = float(ll.get_current_signal().estimate_peak_width()())
print("FWHM = %1.2f" % fwhm)
I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold)
I0 = I0.data
if ll.axes_manager.navigation_size > 0:
I0_shape = list(I0.shape)
I0_shape.insert(axis.index_in_array, 1)
I0 = I0.reshape(I0_shape)
from hyperspy.components import Gaussian
g = Gaussian()
g.sigma.value = fwhm / 2.3548
g.A.value = 1
g.centre.value = 0
zl = g.function(
np.linspace(axis.offset,
axis.offset + axis.scale * (size - 1),
size))
z = np.fft.rfft(zl)
jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array)
jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array)
zshape = [1, ] * len(cl.data.shape)
zshape[axis.index_in_array] = jk.shape[axis.index_in_array]
cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl,
axis=axis.index_in_array)
cl.data *= I0
cl.crop(-1, None, int(orig_cl_size))
cl.metadata.General.title = (self.metadata.General.title +
' after Fourier-ratio deconvolution')
if cl.tmp_parameters.has_item('filename'):
cl.tmp_parameters.filename = (
self.tmp_parameters.filename +
'after_fourier_ratio_deconvolution')
return cl
def richardson_lucy_deconvolution(self, psf, iterations=15,
mask=None):
"""1D Richardson-Lucy Poissonian deconvolution of
the spectrum by the given kernel.
Parameters
----------
iterations: int
Number of iterations of the deconvolution. Note that
increasing the value will increase the noise amplification.
psf: EELSSpectrum
It must have the same signal dimension as the current
spectrum and a spatial dimension of 0 or the same as the
current spectrum.
Notes:
-----
For details on the algorithm see Gloter, A., A. Douiri,
M. Tence, and C. Colliex. “Improving Energy Resolution of
EELS Spectra: An Alternative to the Monochromator Solution.”
Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.
"""
self._check_signal_dimension_equals_one()
ds = self.deepcopy()
ds.data = ds.data.copy()
ds.metadata.General.title += (
' after Richardson-Lucy deconvolution %i iterations' %
iterations)
if ds.tmp_parameters.has_item('filename'):
ds.tmp_parameters.filename += (
'_after_R-L_deconvolution_%iiter' % iterations)
psf_size = psf.axes_manager.signal_axes[0].size
kernel = psf()
imax = kernel.argmax()
j = 0
maxval = self.axes_manager.navigation_size
if maxval > 0:
pbar = progressbar(maxval=maxval)
for D in self:
D = D.data.copy()
if psf.axes_manager.navigation_dimension != 0:
kernel = psf(axes_manager=self.axes_manager)
imax = kernel.argmax()
s = ds(axes_manager=self.axes_manager)
mimax = psf_size - 1 - imax
O = D.copy()
for i in xrange(iterations):
first = np.convolve(kernel, O)[imax: imax + psf_size]
O = O * (np.convolve(kernel[::-1],
D / first)[mimax: mimax + psf_size])
s[:] = O
j += 1
if maxval > 0:
pbar.update(j)
if maxval > 0:
pbar.finish()
return ds
def _are_microscope_parameters_missing(self):
"""Check if the EELS parameters necessary to calculate the GOS
are defined in metadata. If not, in interactive mode
raises an UI item to fill the values"""
must_exist = (
'Acquisition_instrument.TEM.convergence_angle',
'Acquisition_instrument.TEM.beam_energy',
'Acquisition_instrument.TEM.Detector.EELS.collection_angle',)
missing_parameters = []
for item in must_exist:
exists = self.metadata.has_item(item)
if exists is False:
missing_parameters.append(item)
if missing_parameters:
if preferences.General.interactive is True:
par_str = "The following parameters are missing:\n"
for par in missing_parameters:
par_str += '%s\n' % par
par_str += 'Please set them in the following wizard'
is_ok = messagesui.information(par_str)
if is_ok:
self._set_microscope_parameters()
else:
return True
else:
return True
else:
return False
def set_microscope_parameters(self,
beam_energy=None,
convergence_angle=None,
collection_angle=None):
"""Set the microscope parameters that are necessary to calculate
the GOS.
If not all of them are defined, raises in interactive mode
raises an UI item to fill the values
beam_energy: float
The energy of the electron beam in keV
convengence_angle : float
In mrad.
collection_angle : float
In mrad.
"""
mp = self.metadata
if beam_energy is not None:
mp.set_item("Acquisition_instrument.TEM.beam_energy", beam_energy)
if convergence_angle is not None:
mp.set_item(
"Acquisition_instrument.TEM.convergence_angle",
convergence_angle)
if collection_angle is not None:
mp.set_item(
"Acquisition_instrument.TEM.Detector.EELS.collection_angle",
collection_angle)
self._are_microscope_parameters_missing()
@only_interactive
def _set_microscope_parameters(self):
tem_par = TEMParametersUI()
mapping = {
'Acquisition_instrument.TEM.convergence_angle': 'tem_par.convergence_angle',
'Acquisition_instrument.TEM.beam_energy': 'tem_par.beam_energy',
'Acquisition_instrument.TEM.Detector.EELS.collection_angle': 'tem_par.collection_angle', }
for key, value in mapping.iteritems():
if self.metadata.has_item(key):
exec('%s = self.metadata.%s' % (value, key))
tem_par.edit_traits()
mapping = {
'Acquisition_instrument.TEM.convergence_angle': tem_par.convergence_angle,
'Acquisition_instrument.TEM.beam_energy': tem_par.beam_energy,
'Acquisition_instrument.TEM.Detector.EELS.collection_angle': tem_par.collection_angle, }
for key, value in mapping.iteritems():
if value != t.Undefined:
self.metadata.set_item(key, value)
self._are_microscope_parameters_missing()
def power_law_extrapolation(self,
window_size=20,
extrapolation_size=1024,
add_noise=False,
fix_neg_r=False):
"""Extrapolate the spectrum to the right using a powerlaw
Parameters
----------
window_size : int
The number of channels from the right side of the
spectrum that are used to estimate the power law
parameters.
extrapolation_size : int
Size of the extrapolation in number of channels
add_noise : bool
If True, add poissonian noise to the extrapolated spectrum.
fix_neg_r : bool
If True, the negative values for the "components.PowerLaw"
parameter r will be flagged and the extrapolation will be
done with a constant zero-value.
Returns
-------
A new spectrum, with the extrapolation.
"""
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
s = self.deepcopy()
s.metadata.General.title += (
' %i channels extrapolated' %
extrapolation_size)
if s.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename += (
'_%i_channels_extrapolated' % extrapolation_size)
new_shape = list(self.data.shape)
new_shape[axis.index_in_array] += extrapolation_size
s.data = np.zeros((new_shape))
s.get_dimensions_from_data()
s.data[..., :axis.size] = self.data
pl = PowerLaw()
pl._axes_manager = self.axes_manager
pl.estimate_parameters(
s, axis.index2value(axis.size - window_size),
axis.index2value(axis.size - 1))
if fix_neg_r is True:
_r = pl.r.map['values']
_A = pl.A.map['values']
_A[_r <= 0] = 0
pl.A.map['values'] = _A
# If the signal is binned we need to bin the extrapolated power law
# what, in a first approximation, can be done by multiplying by the
# axis step size.
if self.metadata.Signal.binned is True:
factor = s.axes_manager[-1].scale
else:
factor = 1
s.data[..., axis.size:] = (
factor * pl.A.map['values'][..., np.newaxis] *
s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size:] ** (
-pl.r.map['values'][..., np.newaxis]))
return s
def kramers_kronig_analysis(self,
zlp=None,
iterations=1,
n=None,
t=None,
delta=0.5,
full_output=False):
"""Calculate the complex
dielectric function from a single scattering distribution (SSD) using
the Kramers-Kronig relations.
It uses the FFT method as in [Egerton2011]_. The SSD is an
EELSSpectrum instance containing SSD low-loss EELS with no zero-loss
peak. The internal loop is devised to approximately subtract the
surface plasmon contribution supposing an unoxidized planar surface and
neglecting coupling between the surfaces. This method does not account
for retardation effects, instrumental broading and surface plasmon
excitation in particles.
Note that either refractive index or thickness are required.
If both are None or if both are provided an exception is raised.
Parameters
----------
zlp: {None, number, Signal}
ZLP intensity. It is optional (can be None) if `t` is None and `n`
is not None and the thickness estimation is not required. If `t`
is not None, the ZLP is required to perform the normalization and
if `t` is not None, the ZLP is required to calculate the thickness.
If the ZLP is the same for all spectra, the integral of the ZLP
can be provided as a number. Otherwise, if the ZLP intensity is not
the same for all spectra, it can be provided as i) a Signal
of the same dimensions as the current signal containing the ZLP
spectra for each location ii) a Signal of signal dimension 0
and navigation_dimension equal to the current signal containing the
integrated ZLP intensity.
iterations: int
Number of the iterations for the internal loop to remove the
surface plasmon contribution. If 1 the surface plasmon contribution
is not estimated and subtracted (the default is 1).
n: {None, float}
The medium refractive index. Used for normalization of the
SSD to obtain the energy loss function. If given the thickness
is estimated and returned. It is only required when `t` is None.
t: {None, number, Signal}
The sample thickness in nm. Used for normalization of the
SSD to obtain the energy loss function. It is only required when
`n` is None. If the thickness is the same for all spectra it can be
given by a number. Otherwise, it can be provided as a Signal with
signal dimension 0 and navigation_dimension equal to the current
signal.
delta : float
A small number (0.1-0.5 eV) added to the energy axis in
specific steps of the calculation the surface loss correction to
improve stability.
full_output : bool
If True, return a dictionary that contains the estimated
thickness if `t` is None and the estimated surface plasmon
excitation and the spectrum corrected from surface plasmon
excitations if `iterations` > 1.
Returns
-------
eps: DielectricFunction instance
The complex dielectric function results,
$\epsilon = \epsilon_1 + i*\epsilon_2$,
contained in an DielectricFunction instance.
output: Dictionary (optional)
A dictionary of optional outputs with the following keys:
``thickness``
The estimated thickness in nm calculated by normalization of
the SSD (only when `t` is None)
``surface plasmon estimation``
The estimated surface plasmon excitation (only if
`iterations` > 1.)
Raises
------
ValuerError
If both `n` and `t` are undefined (None).
AttribureError
If the beam_energy or the collection angle are not defined in
metadata.
Notes
-----
This method is based in Egerton's Matlab code [Egerton2011]_ with some
minor differences:
* The integrals are performed using the simpsom rule instead of using
a summation.
* The wrap-around problem when computing the ffts is workarounded by
padding the signal instead of substracting the reflected tail.
.. [Egerton2011] Ray Egerton, "Electron Energy-Loss
Spectroscopy in the Electron Microscope", Springer-Verlag, 2011.
"""
output = {}
if iterations == 1:
# In this case s.data is not modified so there is no need to make
# a deep copy.
s = self.isig[0.:]
else:
s = self.isig[0.:].deepcopy()
sorig = self.isig[0.:]
# Avoid singularity at 0
if s.axes_manager.signal_axes[0].axis[0] == 0:
s = s.isig[1:]
sorig = self.isig[1:]
# Constants and units
me = constants.value(
'electron mass energy equivalent in MeV') * 1e3 # keV
# Mapped parameters
try:
e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
except:
raise AttributeError("Please define the beam energy."
"You can do this e.g. by using the "
"set_microscope_parameters method")
try:
beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle
except:
raise AttributeError("Please define the collection angle."
"You can do this e.g. by using the "
"set_microscope_parameters method")
axis = s.axes_manager.signal_axes[0]
eaxis = axis.axis.copy()
if isinstance(zlp, hyperspy.signal.Signal):
if (zlp.axes_manager.navigation_dimension ==
self.axes_manager.navigation_dimension):
if zlp.axes_manager.signal_dimension == 0:
i0 = zlp.data
else:
i0 = zlp.integrate1D(axis.index_in_axes_manager).data
else:
raise ValueError('The ZLP signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
i0 = i0.reshape(
np.insert(i0.shape, axis.index_in_array, 1))
elif isinstance(zlp, numbers.Number):
i0 = zlp
else:
raise ValueError('The zero-loss peak input is not valid.')
if isinstance(t, hyperspy.signal.Signal):
if (t.axes_manager.navigation_dimension ==
self.axes_manager.navigation_dimension) and (
t.axes_manager.signal_dimension == 0):
t = t.data
t = t.reshape(
np.insert(t.shape, axis.index_in_array, 1))
else:
raise ValueError('The thickness signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):
raise ValueError("thickness must be a HyperSpy signal or a number,"
" not a numpy array.")
# Slicer to get the signal data from 0 to axis.size
slicer = s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, axis.size)), ])
# Kinetic definitions
ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2
tgt = e0 * (2 * me + e0) / (me + e0)
rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)
for io in range(iterations):
# Calculation of the ELF by normalization of the SSD
# Norm(SSD) = Imag(-1/epsilon) (Energy Loss Funtion, ELF)
# We start by the "angular corrections"
Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale
if n is None and t is None:
raise ValueError("The thickness and the refractive index are "
"not defined. Please provide one of them.")
elif n is not None and t is not None:
raise ValueError("Please provide the refractive index OR the "
"thickness information, not both")
elif n is not None:
# normalize using the refractive index.
K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale
K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape(
np.insert(K.shape, axis.index_in_array, 1))
# Calculate the thickness only if possible and required
if zlp is not None and (full_output is True or
iterations > 1):
te = (332.5 * K * ke / i0)
if full_output is True:
output['thickness'] = te
elif t is not None:
if zlp is None:
raise ValueError("The ZLP must be provided when the "
"thickness is used for normalization.")
# normalize using the thickness
K = t * i0 / (332.5 * ke)
te = t
Im = Im / K
# Kramers Kronig Transform:
# We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT
# Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490
# Use a size that is a power of two to speed up the fft and
# make it double the closest upper value to workaround the
# wrap-around problem.
esize = 2 * closest_power_of_two(axis.size)
q = -2 * np.fft.fft(Im, esize,
axis.index_in_array).imag / esize
q[slicer] *= -1
q = np.fft.fft(q, axis=axis.index_in_array)
# Final touch, we have Re(1/eps)
Re = q[slicer].real + 1
# Egerton does this to correct the wrap-around problem, but in our
# case this is not necessary because we compute the fft on an
# extended and padded spectrum to avoid this problem.
# Re=real(q)
# Tail correction
# vm=Re[axis.size-1]
# Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /
# (axis.size*2-arange(0,axis.size-1)))**2)
# Re[axis.size:]=1+(0.5*vm*((axis.size-1) /
# (axis.size+arange(0,axis.size)))**2)
# Epsilon appears:
# We calculate the real and imaginary parts of the CDF
e1 = Re / (Re ** 2 + Im ** 2)
e2 = Im / (Re ** 2 + Im ** 2)
if iterations > 1 and zlp is not None:
# Surface losses correction:
# Calculates the surface ELF from a vaccumm border effect
# A simulated surface plasmon is subtracted from the ELF
Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im
adep = (tgt / (eaxis + delta) *
np.arctan(beta * tgt / axis.axis) -
beta / 1000. /
(beta ** 2 + axis.axis ** 2. / tgt ** 2))
Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale
s.data = sorig.data - Srfint
print 'Iteration number: ', io + 1, '/', iterations
if iterations == io + 1 and full_output is True:
sp = sorig._deepcopy_with_new_data(Srfint)
sp.metadata.General.title += (
" estimated surface plasmon excitation.")
output['surface plasmon estimation'] = sp
del sp
del Srfint
eps = s._deepcopy_with_new_data(e1 + e2 * 1j)
del s
eps.set_signal_type("DielectricFunction")
eps.metadata.General.title = (self.metadata.General.title +
'dielectric function '
'(from Kramers-Kronig analysis)')
if eps.tmp_parameters.has_item('filename'):
eps.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_CDF_after_Kramers_Kronig_transform')
if 'thickness' in output:
thickness = eps._get_navigation_signal()
thickness.metadata.General.title = (
self.metadata.General.title + ' thickness '
'(calculated using Kramers-Kronig analysis)')
thickness.data = te[
self.axes_manager._get_data_slice([(
axis.index_in_array, 0)])]
output['thickness'] = thickness
if full_output is False:
return eps
else:
return eps, output
| pburdet/hyperspy | hyperspy/_signals/eels.py | Python | gpl-3.0 | 48,505 | 0.000309 |
__author__ = 'Nathen'
# get min and max bounds
a, b = map(lambda i: int(i), input('Nums: ').split(' '))
# create generator of all odd nums
nums = [x for x in range(a, b + 1) if x % 2 == 1]
# sum nums
ans = sum(nums)
# print answer
print(ans) | nadrees/PyRosalind | Python Village/INI4.py | Python | unlicense | 243 | 0.004115 |
"""
A nearly direct translation of Andrej's code
https://github.com/karpathy/char-rnn
"""
from __future__ import division
import cgt
from cgt import nn, utils, profiler
import numpy as np, numpy.random as nr
import os.path as osp
import argparse
from time import time
from StringIO import StringIO
from param_collection import ParamCollection
# via https://github.com/karpathy/char-rnn/blob/master/model/GRU.lua
# via http://arxiv.org/pdf/1412.3555v1.pdf
def make_deep_gru(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix() for i_layer in xrange(n_layers+1)]
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer+1] # note that inputs[0] is the external input, so we add 1
x = inputs[0] if i_layer==0 else outputs[i_layer-1]
size_x = size_input if i_layer==0 else size_mem
update_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2u")(x)
+ nn.Affine(size_mem, size_mem, name="h2u")(prev_h))
reset_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2r")(x)
+ nn.Affine(size_mem, size_mem, name="h2r")(prev_h))
gated_hidden = reset_gate * prev_h
p2 = nn.Affine(size_mem, size_mem)(gated_hidden)
p1 = nn.Affine(size_x, size_mem)(x)
hidden_target = cgt.tanh(p1+p2)
next_h = (1.0-update_gate)*prev_h + update_gate*hidden_target
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output,name="pred")(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def make_deep_lstm(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix(fixed_shape=(size_batch, size_input))]
for _ in xrange(2*n_layers):
inputs.append(cgt.matrix(fixed_shape=(size_batch, size_mem)))
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer*2]
prev_c = inputs[i_layer*2+1]
if i_layer==0:
x = inputs[0]
size_x = size_input
else:
x = outputs[(i_layer-1)*2]
size_x = size_mem
input_sums = nn.Affine(size_x, 4*size_mem)(x) + nn.Affine(size_x, 4*size_mem)(prev_h)
sigmoid_chunk = cgt.sigmoid(input_sums[:,0:3*size_mem])
in_gate = sigmoid_chunk[:,0:size_mem]
forget_gate = sigmoid_chunk[:,size_mem:2*size_mem]
out_gate = sigmoid_chunk[:,2*size_mem:3*size_mem]
in_transform = cgt.tanh(input_sums[:,3*size_mem:4*size_mem])
next_c = forget_gate*prev_c + in_gate * in_transform
next_h = out_gate*cgt.tanh(next_c)
outputs.append(next_c)
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output)(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def flatcat(xs):
return cgt.concatenate([x.flatten() for x in xs])
def cat_sample(ps):
"""
sample from categorical distribution
ps is a 2D array whose rows are vectors of probabilities
"""
r = nr.rand(len(ps))
out = np.zeros(len(ps),dtype='i4')
cumsums = np.cumsum(ps, axis=1)
for (irow,csrow) in enumerate(cumsums):
for (icol, csel) in enumerate(csrow):
if csel > r[irow]:
out[irow] = icol
break
return out
def rmsprop_update(grad, state):
state.sqgrad[:] *= state.decay_rate
state.count *= state.decay_rate
np.square(grad, out=state.scratch) # scratch=g^2
state.sqgrad += state.scratch
state.count += 1
np.sqrt(state.sqgrad, out=state.scratch) # scratch = sum of squares
np.divide(state.scratch, np.sqrt(state.count), out=state.scratch) # scratch = rms
np.divide(grad, state.scratch, out=state.scratch) # scratch = grad/rms
np.multiply(state.scratch, state.step_size, out=state.scratch)
state.theta[:] -= state.scratch
def make_loss_and_grad_and_step(arch, size_input, size_output, size_mem, size_batch, n_layers, n_unroll):
# symbolic variables
x_tnk = cgt.tensor3()
targ_tnk = cgt.tensor3()
make_network = make_deep_lstm if arch=="lstm" else make_deep_gru
network = make_network(size_input, size_mem, n_layers, size_output, size_batch)
init_hiddens = [cgt.matrix() for _ in xrange(get_num_hiddens(arch, n_layers))]
# TODO fixed sizes
cur_hiddens = init_hiddens
loss = 0
for t in xrange(n_unroll):
outputs = network([x_tnk[t]] + cur_hiddens)
cur_hiddens, prediction_logprobs = outputs[:-1], outputs[-1]
# loss = loss + nn.categorical_negloglik(prediction_probs, targ_tnk[t]).sum()
loss = loss - (prediction_logprobs*targ_tnk[t]).sum()
cur_hiddens = outputs[:-1]
final_hiddens = cur_hiddens
loss = loss / (n_unroll * size_batch)
params = network.get_parameters()
gradloss = cgt.grad(loss, params)
flatgrad = flatcat(gradloss)
with utils.Message("compiling loss+grad"):
f_loss_and_grad = cgt.function([x_tnk, targ_tnk] + init_hiddens, [loss, flatgrad] + final_hiddens)
f_loss = cgt.function([x_tnk, targ_tnk] + init_hiddens, loss)
assert len(init_hiddens) == len(final_hiddens)
x_nk = cgt.matrix('x')
outputs = network([x_nk] + init_hiddens)
f_step = cgt.function([x_nk]+init_hiddens, outputs)
# print "node count", cgt.count_nodes(flatgrad)
return network, f_loss, f_loss_and_grad, f_step
class Table(dict):
"dictionary-like object that exposes its keys as attributes"
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def make_rmsprop_state(theta, step_size, decay_rate):
return Table(theta=theta, sqgrad=np.zeros_like(theta)+1e-6, scratch=np.empty_like(theta),
step_size=step_size, decay_rate=decay_rate, count=0)
class Loader(object):
def __init__(self, data_dir, size_batch, n_unroll, split_fractions):
input_file = osp.join(data_dir,"input.txt")
preproc_file = osp.join(data_dir, "preproc.npz")
run_preproc = not osp.exists(preproc_file) or osp.getmtime(input_file) > osp.getmtime(preproc_file)
if run_preproc:
text_to_tensor(input_file, preproc_file)
data_file = np.load(preproc_file)
self.char2ind = {char:ind for (ind,char) in enumerate(data_file["chars"])}
data = data_file["inds"]
data = data[:data.shape[0] - (data.shape[0] % size_batch)].reshape(size_batch, -1).T # inds_tn
n_batches = (data.shape[0]-1) // n_unroll
data = data[:n_batches*n_unroll+1] # now t-1 is divisble by batch size
self.n_unroll = n_unroll
self.data = data
self.n_train_batches = int(n_batches*split_fractions[0])
self.n_test_batches = int(n_batches*split_fractions[1])
self.n_val_batches = n_batches - self.n_train_batches - self.n_test_batches
print "%i train batches, %i test batches, %i val batches"%(self.n_train_batches, self.n_test_batches, self.n_val_batches)
@property
def size_vocab(self):
return len(self.char2ind)
def train_batches_iter(self):
for i in xrange(self.n_train_batches):
start = i*self.n_unroll
stop = (i+1)*self.n_unroll
yield ind2onehot(self.data[start:stop], self.size_vocab), ind2onehot(self.data[start+1:stop+1], self.size_vocab) # XXX
# XXX move elsewhere
def ind2onehot(inds, n_cls):
inds = np.asarray(inds)
out = np.zeros(inds.shape+(n_cls,),cgt.floatX)
out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
def text_to_tensor(text_file, preproc_file):
with open(text_file,"r") as fh:
text = fh.read()
char2ind = {}
inds = []
for char in text:
ind = char2ind.get(char, -1)
if ind == -1:
ind = len(char2ind)
char2ind[char] = ind
inds.append(ind)
np.savez(preproc_file, inds = inds, chars = sorted(char2ind, key = lambda char : char2ind[char]))
def get_num_hiddens(arch, n_layers):
return {"lstm" : 2 * n_layers, "gru" : n_layers}[arch]
def sample(f_step, init_hiddens, char2ind, n_steps, temperature, seed_text = ""):
vocab_size = len(char2ind)
ind2char = {ind:char for (char,ind) in char2ind.iteritems()}
cur_hiddens = init_hiddens
t = StringIO()
t.write(seed_text)
for char in seed_text:
x_1k = ind2onehot([char2ind[char]], vocab_size)
net_outputs = f_step(x_1k, cur_hiddens)
cur_hiddens, logprobs_1k = net_outputs[:-1], net_outputs[-1]
if len(seed_text)==0:
logprobs_1k = np.zeros((1,vocab_size))
for _ in xrange(n_steps):
logprobs_1k /= temperature
probs_1k = np.exp(logprobs_1k*2)
probs_1k /= probs_1k.sum()
index = cat_sample(probs_1k)[0]
char = ind2char[index]
x_1k = ind2onehot([index], vocab_size)
net_outputs = f_step(x_1k, *cur_hiddens)
cur_hiddens, logprobs_1k = net_outputs[:-1], net_outputs[-1]
t.write(char)
cgt.utils.colorprint(cgt.utils.Color.YELLOW, t.getvalue() + "\n")
def main():
nr.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="alice")
parser.add_argument("--size_mem", type=int,default=64)
parser.add_argument("--size_batch", type=int,default=64)
parser.add_argument("--n_layers",type=int,default=2)
parser.add_argument("--n_unroll",type=int,default=16)
parser.add_argument("--step_size",type=float,default=.01)
parser.add_argument("--decay_rate",type=float,default=0.95)
parser.add_argument("--n_epochs",type=int,default=20)
parser.add_argument("--arch",choices=["lstm","gru"],default="lstm")
parser.add_argument("--grad_check",action="store_true")
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest",action="store_true")
parser.add_argument("--temperature",type=float,default=1)
args = parser.parse_args()
cgt.set_precision("quad" if args.grad_check else "single")
assert args.n_unroll > 1
loader = Loader(args.data_dir,args.size_batch, args.n_unroll, (1.0,0,0))
network, f_loss, f_loss_and_grad, f_step = make_loss_and_grad_and_step(args.arch, loader.size_vocab,
loader.size_vocab, args.size_mem, args.size_batch, args.n_layers, args.n_unroll)
if args.profile: profiler.start()
params = network.get_parameters()
pc = ParamCollection(params)
pc.set_value_flat(nr.uniform(-.1, .1, size=(pc.get_total_size(),)))
def initialize_hiddens(n):
return [np.zeros((n, args.size_mem), cgt.floatX) for _ in xrange(get_num_hiddens(args.arch, args.n_layers))]
if args.grad_check :
x,y = loader.train_batches_iter().next()
prev_hiddens = initialize_hiddens(args.size_batch)
def f(thnew):
thold = pc.get_value_flat()
pc.set_value_flat(thnew)
loss = f_loss(x,y, *prev_hiddens)
pc.set_value_flat(thold)
return loss
from cgt.numeric_diff import numeric_grad
# g_num = numeric_grad(f, pc.get_value_flat(),eps=1e-10)
result = f_loss_and_grad(x,y,*prev_hiddens)
g_anal = result[1]
assert np.allclose(g_num, g_anal, atol=1e-4)
print "Gradient check succeeded!"
return
optim_state = make_rmsprop_state(theta=pc.get_value_flat(), step_size = args.step_size,
decay_rate = args.decay_rate)
for iepoch in xrange(args.n_epochs):
losses = []
tstart = time()
print "starting epoch",iepoch
cur_hiddens = initialize_hiddens(args.size_batch)
for (x,y) in loader.train_batches_iter():
out = f_loss_and_grad(x,y, *cur_hiddens)
loss = out[0]
grad = out[1]
cur_hiddens = out[2:]
rmsprop_update(grad, optim_state)
pc.set_value_flat(optim_state.theta)
losses.append(loss)
if args.unittest: return
print "%.3f s/batch. avg loss = %.3f"%((time()-tstart)/len(losses), np.mean(losses))
optim_state.step_size *= .98 #pylint: disable=E1101
sample(f_step, initialize_hiddens(1), char2ind=loader.char2ind, n_steps=1000, temperature=args.temperature, seed_text = "")
if args.profile: profiler.print_stats()
if __name__ == "__main__":
main()
| rohanraja/cgt_distributed | examples/char.py | Python | mit | 12,474 | 0.010742 |
"""
components.verisure
~~~~~~~~~~~~~~~~~~~
Provides support for verisure components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/verisure/
"""
import logging
import time
from datetime import timedelta
from homeassistant import bootstrap
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, CONF_PASSWORD, CONF_USERNAME,
EVENT_PLATFORM_DISCOVERED)
from homeassistant.helpers import validate_config
from homeassistant.loader import get_component
from homeassistant.util import Throttle
DOMAIN = "verisure"
DISCOVER_SENSORS = 'verisure.sensors'
DISCOVER_SWITCHES = 'verisure.switches'
DISCOVER_ALARMS = 'verisure.alarm_control_panel'
DISCOVER_LOCKS = 'verisure.lock'
DEPENDENCIES = ['alarm_control_panel']
REQUIREMENTS = ['vsure==0.5.1']
_LOGGER = logging.getLogger(__name__)
MY_PAGES = None
ALARM_STATUS = {}
SMARTPLUG_STATUS = {}
CLIMATE_STATUS = {}
LOCK_STATUS = {}
MOUSEDETECTION_STATUS = {}
VERISURE_LOGIN_ERROR = None
VERISURE_ERROR = None
SHOW_THERMOMETERS = True
SHOW_HYGROMETERS = True
SHOW_ALARM = True
SHOW_SMARTPLUGS = True
SHOW_LOCKS = True
SHOW_MOUSEDETECTION = True
CODE_DIGITS = 4
# if wrong password was given don't try again
WRONG_PASSWORD_GIVEN = False
MIN_TIME_BETWEEN_REQUESTS = timedelta(seconds=1)
def setup(hass, config):
""" Setup the Verisure component. """
if not validate_config(config,
{DOMAIN: [CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return False
from verisure import MyPages, LoginError, Error
global SHOW_THERMOMETERS, SHOW_HYGROMETERS,\
SHOW_ALARM, SHOW_SMARTPLUGS, SHOW_LOCKS, SHOW_MOUSEDETECTION,\
CODE_DIGITS
SHOW_THERMOMETERS = int(config[DOMAIN].get('thermometers', '1'))
SHOW_HYGROMETERS = int(config[DOMAIN].get('hygrometers', '1'))
SHOW_ALARM = int(config[DOMAIN].get('alarm', '1'))
SHOW_SMARTPLUGS = int(config[DOMAIN].get('smartplugs', '1'))
SHOW_LOCKS = int(config[DOMAIN].get('locks', '1'))
SHOW_MOUSEDETECTION = int(config[DOMAIN].get('mouse', '1'))
CODE_DIGITS = int(config[DOMAIN].get('code_digits', '4'))
global MY_PAGES
MY_PAGES = MyPages(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD])
global VERISURE_LOGIN_ERROR, VERISURE_ERROR
VERISURE_LOGIN_ERROR = LoginError
VERISURE_ERROR = Error
try:
MY_PAGES.login()
except (ConnectionError, Error) as ex:
_LOGGER.error('Could not log in to verisure mypages, %s', ex)
return False
update_alarm()
update_climate()
update_smartplug()
update_lock()
update_mousedetection()
# Load components for the devices in the ISY controller that we support
for comp_name, discovery in ((('sensor', DISCOVER_SENSORS),
('switch', DISCOVER_SWITCHES),
('alarm_control_panel', DISCOVER_ALARMS),
('lock', DISCOVER_LOCKS))):
component = get_component(comp_name)
_LOGGER.info(config[DOMAIN])
bootstrap.setup_component(hass, component.DOMAIN, config)
hass.bus.fire(EVENT_PLATFORM_DISCOVERED,
{ATTR_SERVICE: discovery,
ATTR_DISCOVERED: {}})
return True
def reconnect():
""" Reconnect to verisure mypages. """
try:
time.sleep(1)
MY_PAGES.login()
except VERISURE_LOGIN_ERROR as ex:
_LOGGER.error("Could not login to Verisure mypages, %s", ex)
global WRONG_PASSWORD_GIVEN
WRONG_PASSWORD_GIVEN = True
except (ConnectionError, VERISURE_ERROR) as ex:
_LOGGER.error("Could not login to Verisure mypages, %s", ex)
@Throttle(MIN_TIME_BETWEEN_REQUESTS)
def update_alarm():
""" Updates the status of alarms. """
update_component(MY_PAGES.alarm.get, ALARM_STATUS)
@Throttle(MIN_TIME_BETWEEN_REQUESTS)
def update_climate():
""" Updates the status of climate sensors. """
update_component(MY_PAGES.climate.get, CLIMATE_STATUS)
@Throttle(MIN_TIME_BETWEEN_REQUESTS)
def update_smartplug():
""" Updates the status of smartplugs. """
update_component(MY_PAGES.smartplug.get, SMARTPLUG_STATUS)
def update_lock():
""" Updates the status of alarms. """
update_component(MY_PAGES.lock.get, LOCK_STATUS)
def update_mousedetection():
""" Updates the status of mouse detectors. """
update_component(MY_PAGES.mousedetection.get, MOUSEDETECTION_STATUS)
def update_component(get_function, status):
""" Updates the status of verisure components. """
if WRONG_PASSWORD_GIVEN:
_LOGGER.error('Wrong password')
return
try:
for overview in get_function():
try:
status[overview.id] = overview
except AttributeError:
status[overview.deviceLabel] = overview
except (ConnectionError, VERISURE_ERROR) as ex:
_LOGGER.error('Caught connection error %s, tries to reconnect', ex)
reconnect()
| nnic/home-assistant | homeassistant/components/verisure.py | Python | mit | 5,074 | 0 |
#!/usr/bin/env python
# encoding: utf-8
"""
crawler.py
~~~~~~~~~~~~~
主要模块,爬虫的具体实现。
"""
import re
import time
import logging
import threading
import traceback
from hashlib import md5
from bs4 import BeautifulSoup
from datetime import datetime
from collections import deque
from locale import getdefaultlocale
from urlparse import urljoin,urlparse
from database import Database
from webPage import WebPage
from threadPool import ThreadPool
log = logging.getLogger('spider')
class Crawler(threading.Thread):
def __init__(self, args, queue):
threading.Thread.__init__(self)
#指定网页深度
self.depth = args['depth']
#标注初始爬虫深度,从1开始
self.currentDepth = 1
#指定关键词,使用console的默认编码来解码
self.keyword = args['keyword'].decode(getdefaultlocale()[1])
#数据库
self.database = Database(db="bt_tornado")
#线程池,指定线程数
self.threadPool = ThreadPool(args['threadNum'])
#已访问的链接
self.visitedHrefs = set()
#待访问的链接
self.unvisitedHrefs = deque()
#添加待访问的链接
for url in args['url']:
self.unvisitedHrefs.append(url)
#标记爬虫是否开始执行任务
self.isCrawling = False
# allow or deny crawl url
self.entryFilter = args['entryFilter']
# allow to output back url
self.yieldFilter = args['yieldFilter']
#
self.callbackFilter = args['callbackFilter']
#
self.db = args['db']
self.collection = args['collection']
# communication queue
self.queue = queue
def run(self):
print '\nStart Crawling\n'
if not self._isDatabaseAvaliable():
print 'Error: Unable to open database file.\n'
else:
self.isCrawling = True
self.threadPool.startThreads()
while self.currentDepth < self.depth+1:
#分配任务,线程池并发下载当前深度的所有页面(该操作不阻塞)
self._assignCurrentDepthTasks ()
#等待当前线程池完成所有任务,当池内的所有任务完成时,即代表爬完了一个网页深度
#self.threadPool.taskJoin()可代替以下操作,可无法Ctrl-C Interupt
while self.threadPool.getTaskLeft():
time.sleep(8)
print 'Depth %d Finish. Totally visited %d links. \n' % (
self.currentDepth, len(self.visitedHrefs))
log.info('Depth %d Finish. Total visited Links: %d\n' % (
self.currentDepth, len(self.visitedHrefs)))
self.currentDepth += 1
self.stop()
def stop(self):
self.isCrawling = False
self.threadPool.stopThreads()
self.database.close()
#use queue to communicate between threads
self.queue.get()
self.queue.task_done()
def getAlreadyVisitedNum(self):
#visitedHrefs保存已经分配给taskQueue的链接,有可能链接还在处理中。
#因此真实的已访问链接数为visitedHrefs数减去待访问的链接数
return len(self.visitedHrefs) - self.threadPool.getTaskLeft()
def _assignCurrentDepthTasks(self):
while self.unvisitedHrefs:
url = self.unvisitedHrefs.popleft()
if not self.__entry_filter(url):
self.visitedHrefs.add(url)
continue
#向任务队列分配任务
self.threadPool.putTask(self._taskHandler, url)
#标注该链接已被访问,或即将被访问,防止重复访问相同链接
self.visitedHrefs.add(url)
def _callback_filter(self, webPage):
#parse the web page to do sth
url , pageSource = webPage.getDatas()
for tmp in self.callbackFilter['List']:
if re.compile(tmp,re.I|re.U).search(url):
self.callbackFilter['func'](webPage)
def _taskHandler(self, url):
#先拿网页源码,再保存,两个都是高阻塞的操作,交给线程处理
webPage = WebPage(url)
tmp = webPage.fetch()
if tmp:
self._callback_filter(webPage)
self._saveTaskResults(webPage)
self._addUnvisitedHrefs(webPage)
def _saveTaskResults(self, webPage):
url, pageSource = webPage.getDatas()
_id = md5(url).hexdigest()
try:
if self.__yield_filter(url):
query = {"id": _id}
document = {"id": _id, "url":url, "createTime": datetime.now()}
self.database.saveData(query=query, collection=self.collection, document=document)
except Exception, e:
log.error(' URL: %s ' % url + traceback.format_exc())
def _addUnvisitedHrefs(self, webPage):
'''添加未访问的链接。将有效的url放进UnvisitedHrefs列表'''
#对链接进行过滤:1.只获取http或https网页;2.保证每个链接只访问一次
url, pageSource = webPage.getDatas()
hrefs = self._getAllHrefsFromPage(url, pageSource)
for href in hrefs:
if self._isHttpOrHttpsProtocol(href):
if not self._isHrefRepeated(href):
self.unvisitedHrefs.append(href)
def _getAllHrefsFromPage(self, url, pageSource):
'''解析html源码,获取页面所有链接。返回链接列表'''
hrefs = []
soup = BeautifulSoup(pageSource)
results = soup.find_all('a',href=True)
for a in results:
#必须将链接encode为utf8, 因为中文文件链接如 http://aa.com/文件.pdf
#在bs4中不会被自动url编码,从而导致encodeException
href = a.get('href').encode('utf8')
if not href.startswith('http'):
href = urljoin(url, href)#处理相对链接的问题
hrefs.append(href)
return hrefs
def _isHttpOrHttpsProtocol(self, href):
protocal = urlparse(href).scheme
if protocal == 'http' or protocal == 'https':
return True
return False
def _isHrefRepeated(self, href):
if href in self.visitedHrefs or href in self.unvisitedHrefs:
return True
return False
def _isDatabaseAvaliable(self):
if self.database.isConn():
return True
return False
def __entry_filter(self, checkURL):
'''
入口过滤器
决定了爬虫可以进入哪些url指向的页面进行抓取
@param checkURL: 交给过滤器检查的url
@type checkURL: 字符串
@return: 通过检查则返回True,否则返回False
@rtype: 布尔值
'''
# 如果定义了过滤器则检查过滤器
if self.entryFilter:
if self.entryFilter['Type'] == 'allow': # 允许模式,只要满足一个就允许,否则不允许
result = False
for rule in self.entryFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = True
break
return result
elif self.entryFilter['Type'] == 'deny': # 排除模式,只要满足一个就不允许,否则允许
result = True
for rule in self.entryFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = False
break
return result
# 没有过滤器则默认允许
return True
def __yield_filter(self, checkURL):
'''
生成过滤器
决定了爬虫可以返回哪些url
@param checkURL: 交给过滤器检查的url
@type checkURL: 字符串
@return: 通过检查则返回True,否则返回False
@rtype: 布尔值
'''
# 如果定义了过滤器则检查过滤器
if self.yieldFilter:
if self.yieldFilter['Type'] == 'allow': # 允许模式,只要满足一个就允许,否则不允许
result = False
for rule in self.yieldFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = True
break
return result
elif self.yieldFilter['Type'] == 'deny': # 排除模式,只要满足一个就不允许,否则允许
result = True
for rule in self.yieldFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = False
break
return result
# 没有过滤器则默认允许
return True
| zhkzyth/a-super-fast-crawler | crawler.py | Python | mit | 9,133 | 0.00478 |
# Copyright (c) 2016 kamyu. All rights reserved.
#
# Google Code Jam 2014 Round 1B - Problem A. The Repeater
# https://code.google.com/codejam/contest/2994486/dashboard#s=p0
#
# Time: O(X * N), N is the number of strings,
# X is the number of characters in the frequency string.
# Space: O(X * N)
#
from random import randint
def find_kth_largest(nums, k):
def partition_around_pivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = partition_around_pivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def run_length_encoding(s):
encode_str = [[1, s[0]]]
for i in xrange(1, len(s)):
if s[i] != encode_str[-1][1]:
encode_str.append([1, s[i]])
else:
encode_str[-1][0] += 1
return encode_str
def the_repeater():
strs = []
for _ in xrange(input()):
strs.append(run_length_encoding(raw_input().strip()))
for s in strs:
if len(s) != len(strs[0]):
return "Fegla Won"
for i in xrange(len(s)):
if s[i][1] != strs[0][i][1]:
return "Fegla Won"
move = 0
for j in xrange(len(strs[0])): # X times.
freqs = [strs[i][j][0] for i in xrange(len(strs))] # N times.
# Median minimizes the sum of absolute deviations.
# freqs.sort() # O(NlogN)
# median = freqs[len(freqs)/2]
median = find_kth_largest(freqs, len(freqs)/2 + 1) # O(N) on average.
for freq in freqs:
move += abs(freq - median)
return move
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, the_repeater())
| kamyu104/GoogleCodeJam-2014 | Round 1B/the-repeater.py | Python | mit | 2,367 | 0.00169 |
#!/bin/env python
import itertools
import collections
def read_table(filename):
with open(filename) as fp:
header = next(fp).split()
rows = [line.split()[1:] for line in fp if line.strip()]
columns = zip(*rows)
data = dict(zip(header, columns))
return data
table = read_table("../../data/colldata.txt")
pots = sorted(table)
alphabet = "+-?"
for num in range(2, len(table) + 1):
for group in itertools.combinations(pots, num):
patterns = zip(*[table[p] for p in group])
counts = collections.Counter(patterns)
for poss in itertools.product(alphabet, repeat=num):
print ', '.join(group) + ':',
print ''.join(poss), counts[poss]
| ketancmaheshwari/hello-goog | src/python/collectionsexample.py | Python | apache-2.0 | 718 | 0.001393 |
from MA_version import version as __version__
from MA_version import version_info as __version_info__
from MA import *
| fxia22/ASM_xf | PythonD/site_python/Numeric/MA/__init__.py | Python | gpl-2.0 | 119 | 0 |
import requests
from django.shortcuts import render
# Create your views here.
from django.template.response import TemplateResponse
from account.models import OAuthToken
def list_azure_groups(request):
token = OAuthToken.objects.filter(user=request.user, service__name='microsoft').last()
if token is None:
return ''
r = requests.get('https://graph.microsoft.com/v1.0/groups',
headers={'Authorization': 'Bearer {}'.format(token.auth_token)})
return TemplateResponse(request, 'permissions/azure.html', context={'groups': r.json()['value']})
| BdEINSALyon/resa | permissions/views.py | Python | gpl-3.0 | 589 | 0.005093 |
from auditlog.models import LogEntry
from django.contrib import admin
admin.site.unregister(LogEntry)
| OWASP/django-DefectDojo | dojo/admin.py | Python | bsd-3-clause | 103 | 0 |
import caffe
import surgery, score
import numpy as np
import os
import setproctitle
setproctitle.setproctitle(os.path.basename(os.getcwd()))
weights = '../ilsvrc-nets/vgg16-fcn.caffemodel'
# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()
solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)
# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
# scoring
test = np.loadtxt('../data/nyud/test.txt', dtype=str)
for _ in range(50):
solver.step(2000)
score.seg_tests(solver, False, val, layer='score')
| wkentaro/fcn | fcn/external/fcn.berkeleyvision.org/nyud-fcn32s-hha/solve.py | Python | mit | 616 | 0.001623 |
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfp_server_api
from kfp_server_api.models.api_run_storage_state import ApiRunStorageState # noqa: E501
from kfp_server_api.rest import ApiException
class TestApiRunStorageState(unittest.TestCase):
"""ApiRunStorageState unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ApiRunStorageState
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.api_run_storage_state.ApiRunStorageState() # noqa: E501
if include_optional :
return ApiRunStorageState(
)
else :
return ApiRunStorageState(
)
def testApiRunStorageState(self):
"""Test ApiRunStorageState"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| kubeflow/pipelines | backend/api/python_http_client/test/test_api_run_storage_state.py | Python | apache-2.0 | 1,436 | 0.003482 |
"""Tests for the simple rate limiting classes of the ``unshorten`` app."""
from mock import Mock
from django.conf import settings
from django.test import TestCase
from mixer.backend.django import mixer
from ..backend import RateLimit
from ..models import APICallDayHistory
class SimpleRateLimitTestCase(TestCase):
"""Tests for the ``SimpleRateLimit`` class."""
longMessage = True
def setUp(self):
self.history = mixer.blend('unshorten.APICallDayHistory',
amount_api_calls=2500)
self.request = Mock(user=self.history.user)
def test_is_rate_limit_exceeded(self):
"""Test for the ``is_rate_limit_exceeded`` method."""
rate_limit = RateLimit(self.request)
self.assertEqual(rate_limit.is_rate_limit_exceeded(), False, msg=(
'Rate limit should not be exceeded.'))
rate_limit = RateLimit(self.request)
self.history.amount_api_calls = settings.UNSHORTEN_DAILY_LIMIT
self.history.save()
self.assertEqual(rate_limit.is_rate_limit_exceeded(), True, msg=(
'Rate limit should be exceeded.'))
rate_limit = RateLimit(self.request)
self.history.delete()
self.assertEqual(rate_limit.is_rate_limit_exceeded(), False, msg=(
'Rate limit should not be exceeded if no history is logged.'))
def test_log_api_call(self):
"""Test for the ``log_api_call`` method."""
rate_limit = RateLimit(self.request)
history = rate_limit.log_api_call()
self.assertEqual(APICallDayHistory.objects.all().count(), 1, msg=(
'Should create a APICallDayHistory object.'))
self.assertEqual(
history.amount_api_calls, self.history.amount_api_calls + 1, msg=(
'The amount of api calls should have increased.'))
| bitmazk/django-unshorten | unshorten/tests/rate_limit_tests.py | Python | mit | 1,841 | 0 |
from utils import scaleToZoom
def jsonScript(layer):
json = """
<script src="data/json_{layer}.js\"></script>""".format(layer=layer)
return json
def scaleDependentLayerScript(layer, layerName):
min = layer.minimumScale()
max = layer.maximumScale()
scaleDependentLayer = """
if (map.getZoom() <= {min} && map.getZoom() >= {max}) {{
feature_group.addLayer(json_{layerName}JSON);
console.log("show");
//restackLayers();
}} else if (map.getZoom() > {min} || map.getZoom() < {max}) {{
feature_group.removeLayer(json_{layerName}JSON);
console.log("hide");
//restackLayers();
}}""".format(min=scaleToZoom(min), max=scaleToZoom(max), layerName=layerName)
return scaleDependentLayer
def scaleDependentScript(layers):
scaleDependent = """
map.on("zoomend", function(e) {"""
scaleDependent += layers
scaleDependent += """
});"""
scaleDependent += layers
return scaleDependent
def openScript():
openScript = """
<script>"""
return openScript
def crsScript(crsAuthId, crsProj4):
crs = """
var crs = new L.Proj.CRS('{crsAuthId}', '{crsProj4}', {{
resolutions: [2800, 1400, 700, 350, 175, 84, 42, 21, 11.2, 5.6, 2.8, 1.4, 0.7, 0.35, 0.14, 0.07],
}});""".format(crsAuthId=crsAuthId, crsProj4=crsProj4)
return crs
def mapScript(extent, matchCRS, crsAuthId, measure, maxZoom, minZoom, bounds):
map = """
var map = L.map('map', {"""
if extent == "Canvas extent" and matchCRS and crsAuthId != 'EPSG:4326':
map += """
crs: crs,
continuousWorld: false,
worldCopyJump: false, """
if measure:
map += """
measureControl:true,"""
map += """
zoomControl:true, maxZoom:""" + unicode(maxZoom) + """, minZoom:""" + unicode(minZoom) + """
})"""
if extent == "Canvas extent":
map += """.fitBounds(""" + bounds + """);"""
map += """
var hash = new L.Hash(map);
var additional_attrib = '<a href="https://github.com/tomchadwin/qgis2web" target ="_blank">qgis2web</a>';"""
return map
def featureGroupsScript():
featureGroups = """
var feature_group = new L.featureGroup([]);
var raster_group = new L.LayerGroup([]);"""
return featureGroups
def basemapsScript(basemap, attribution):
basemaps = """
var basemap = L.tileLayer('{basemap}', {{
attribution: additional_attrib + ' {attribution}'
}});
basemap.addTo(map);""".format(basemap=basemap, attribution=attribution)
return basemaps
def layerOrderScript():
layerOrder = """
var layerOrder=new Array();
function restackLayers() {
for (index = 0; index < layerOrder.length; index++) {
feature_group.removeLayer(layerOrder[index]);
feature_group.addLayer(layerOrder[index]);
}
}
layerControl = L.control.layers({},{},{collapsed:false});"""
return layerOrder
def popFuncsScript(table):
popFuncs = """
var popupContent = {table};
layer.bindPopup(popupContent);""".format(table=table)
return popFuncs
def popupScript(safeLayerName, popFuncs):
popup = """
function pop_{safeLayerName}(feature, layer) {{{popFuncs}
}}""".format(safeLayerName=safeLayerName, popFuncs=popFuncs)
return popup
def pointToLayerScript(radius, borderWidth, borderStyle, colorName, borderColor, borderOpacity, opacity, labeltext):
pointToLayer = """
pointToLayer: function (feature, latlng) {{
return L.circleMarker(latlng, {{
radius: {radius},
fillColor: '{colorName}',
color: '{borderColor}',
weight: {borderWidth},
opacity: {borderOpacity},
dashArray: '{dashArray}',
fillOpacity: {opacity}
}}){labeltext}""".format(radius=radius,
colorName=colorName,
borderColor=borderColor,
borderWidth=borderWidth * 4,
borderOpacity=borderOpacity if borderStyle != 0 else 0,
dashArray=getLineStyle(borderStyle, borderWidth),
opacity=opacity,
labeltext=labeltext)
return pointToLayer
def pointStyleScript(pointToLayer, popFuncs):
pointStyle = """{pointToLayer}
}},
onEachFeature: function (feature, layer) {{{popFuncs}
}}""".format(pointToLayer=pointToLayer, popFuncs=popFuncs)
return pointStyle
def wfsScript(scriptTag):
wfs = """
<script src='{scriptTag}'></script>""".format(scriptTag=scriptTag)
return wfs
def jsonPointScript(safeLayerName, pointToLayer, usedFields):
if usedFields != 0:
jsonPoint = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
onEachFeature: pop_{safeLayerName}, {pointToLayer}
}}
}});
layerOrder[layerOrder.length] = json_{safeLayerName}JSON;""".format(safeLayerName=safeLayerName, pointToLayer=pointToLayer)
else:
jsonPoint = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
{pointToLayer}
}}
}});
layerOrder[layerOrder.length] = json_{safeLayerName}JSON;""".format(safeLayerName=safeLayerName, pointToLayer=pointToLayer)
return jsonPoint
def clusterScript(safeLayerName):
cluster = """
var cluster_group{safeLayerName}JSON = new L.MarkerClusterGroup({{showCoverageOnHover: false}});
cluster_group{safeLayerName}JSON.addLayer(json_{safeLayerName}JSON);""".format(safeLayerName=safeLayerName)
return cluster
def categorizedPointStylesScript(symbol, opacity, borderOpacity):
styleValues = """
radius: '{radius}',
fillColor: '{fillColor}',
color: '{color}',
weight: {borderWidth},
opacity: {borderOpacity},
dashArray: '{dashArray}',
fillOpacity: '{opacity}',
}};
break;""".format(radius=symbol.size() * 2,
fillColor=symbol.color().name(),
color=symbol.symbolLayer(0).borderColor().name(),
borderWidth=symbol.symbolLayer(0).outlineWidth() * 4,
borderOpacity=borderOpacity if symbol.symbolLayer(0).outlineStyle() != 0 else 0,
dashArray=getLineStyle(symbol.symbolLayer(0).outlineStyle(), symbol.symbolLayer(0).outlineWidth()),
opacity=opacity)
return styleValues
def simpleLineStyleScript(radius, colorName, penStyle, opacity):
lineStyle = """
return {{
weight: {radius},
color: '{colorName}',
dashArray: '{penStyle}',
opacity: {opacity}
}};""".format(radius=radius * 4,
colorName=colorName,
penStyle=penStyle,
opacity=opacity)
return lineStyle
def singlePolyStyleScript(radius, colorName, borderOpacity, fillColor, penStyle, opacity):
polyStyle = """
return {{
weight: {radius},
color: '{colorName}',
fillColor: '{fillColor}',
dashArray: '{penStyle}',
opacity: {borderOpacity},
fillOpacity: {opacity}
}};""".format(radius=radius,
colorName=colorName,
fillColor=fillColor,
penStyle=penStyle,
borderOpacity=borderOpacity,
opacity=opacity)
return polyStyle
def nonPointStylePopupsScript(lineStyle, popFuncs):
nonPointStylePopups = """
style: function (feature) {{{lineStyle}
}},
onEachFeature: function (feature, layer) {{{popFuncs}
}}""".format(lineStyle=lineStyle, popFuncs=popFuncs)
return nonPointStylePopups
def nonPointStyleFunctionScript(safeLayerName, lineStyle):
nonPointStyleFunction = """
function doStyle{safeLayerName}(feature) {{{lineStyle}
}}""".format(safeLayerName=safeLayerName, lineStyle=lineStyle)
return nonPointStyleFunction
def categoryScript(layerName, valueAttr):
category = """
function doStyle{layerName}(feature) {{
switch (feature.properties.{valueAttr}) {{""".format(layerName=layerName, valueAttr=valueAttr)
return category
def defaultCategoryScript():
defaultCategory = """
default:
return {"""
return defaultCategory
def eachCategoryScript(catValue):
if isinstance(catValue, basestring):
valQuote = "'"
else:
valQuote = ""
eachCategory = """
case """ + valQuote + unicode(catValue) + valQuote + """:
return {"""
return eachCategory
def endCategoryScript():
endCategory = """
}
}"""
return endCategory
def categorizedPointWFSscript(layerName, labeltext, popFuncs):
categorizedPointWFS = """
pointToLayer: function (feature, latlng) {{
return L.circleMarker(latlng, doStyle{layerName}(feature)){labeltext}
}},
onEachFeature: function (feature, layer) {{{popFuncs}
}}""".format(layerName=layerName, labeltext=labeltext, popFuncs=popFuncs)
return categorizedPointWFS
def categorizedPointJSONscript(safeLayerName, labeltext, usedFields):
if usedFields != 0:
categorizedPointJSON = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
onEachFeature: pop_{safeLayerName},
pointToLayer: function (feature, latlng) {{
return L.circleMarker(latlng, doStyle{safeLayerName}(feature)){labeltext}
}}
}});
layerOrder[layerOrder.length] = json_{safeLayerName}JSON;""".format(safeLayerName=safeLayerName, labeltext=labeltext)
else:
categorizedPointJSON = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
pointToLayer: function (feature, latlng) {{
return L.circleMarker(latlng, doStyle{safeLayerName}(feature)){labeltext}
}}
}});
layerOrder[layerOrder.length] = json_{safeLayerName}JSON;""".format(safeLayerName=safeLayerName, labeltext=labeltext)
return categorizedPointJSON
def categorizedLineStylesScript(symbol, opacity):
categorizedLineStyles = """
color: '{color}',
weight: '{weight}',
dashArray: '{dashArray}',
opacity: '{opacity}',
}};
break;""".format(color=symbol.color().name(),
weight=symbol.width() * 4,
dashArray=getLineStyle(symbol.symbolLayer(0).penStyle(), symbol.width()),
opacity=opacity)
return categorizedLineStyles
def categorizedNonPointStyleFunctionScript(layerName, popFuncs):
categorizedNonPointStyleFunction = """
style: doStyle{layerName},
onEachFeature: function (feature, layer) {{{popFuncs}
}}""".format(layerName=layerName, popFuncs=popFuncs)
return categorizedNonPointStyleFunction
def categorizedPolygonStylesScript(symbol, opacity, borderOpacity):
categorizedPolygonStyles = """
weight: '{weight}',
fillColor: '{fillColor}',
color: '{color}',
dashArray: '{dashArray}',
opacity: '{borderOpacity}',
fillOpacity: '{opacity}',
}};
break;""".format(weight=symbol.symbolLayer(0).borderWidth() * 4,
fillColor=symbol.color().name() if symbol.symbolLayer(0).brushStyle() != 0 else "none",
color=symbol.symbolLayer(0).borderColor().name() if symbol.symbolLayer(0).borderStyle() != 0 else "none",
dashArray=getLineStyle(symbol.symbolLayer(0).borderStyle(), symbol.symbolLayer(0).borderWidth()),
borderOpacity=borderOpacity,
opacity=opacity)
return categorizedPolygonStyles
def graduatedStyleScript(layerName):
graduatedStyle = """
function doStyle{layerName}(feature) {{""".format(layerName=layerName)
return graduatedStyle
def rangeStartScript(valueAttr, r):
rangeStart = """
if (feature.properties.{valueAttr} >= {lowerValue} && feature.properties.{valueAttr} <= {upperValue}) {{""".format(valueAttr=valueAttr, lowerValue=r.lowerValue(), upperValue=r.upperValue())
return rangeStart
def graduatedPointStylesScript(valueAttr, r, symbol, opacity, borderOpacity):
graduatedPointStyles = rangeStartScript(valueAttr, r)
graduatedPointStyles += """
return {{
radius: '{radius}',
fillColor: '{fillColor}',
color: '{color}',
weight: {lineWeight},
fillOpacity: '{opacity}',
opacity: '{borderOpacity}',
dashArray: '{dashArray}'
}}
}}""".format(radius=symbol.size() * 2,
fillColor=symbol.color().name(),
color=symbol.symbolLayer(0).borderColor().name(),
lineWeight=symbol.symbolLayer(0).outlineWidth() * 4,
opacity=opacity,
borderOpacity=borderOpacity,
dashArray=getLineStyle(symbol.symbolLayer(0).outlineStyle(), symbol.symbolLayer(0).outlineWidth()))
return graduatedPointStyles
def graduatedLineStylesScript(valueAttr, r, categoryStr, symbol, opacity):
graduatedLineStyles = rangeStartScript(valueAttr, r)
graduatedLineStyles += """
return {{
color: '{color}',
weight: '{weight}',
dashArray: '{dashArray}',
opacity: '{opacity}',
}}
}}""".format(color=symbol.symbolLayer(0).color().name(),
weight=symbol.width() * 4,
dashArray=getLineStyle(symbol.symbolLayer(0).penStyle(), symbol.width()),
opacity=opacity)
return graduatedLineStyles
def graduatedPolygonStylesScript(valueAttr, r, symbol, opacity, borderOpacity):
graduatedPolygonStyles = rangeStartScript(valueAttr, r)
graduatedPolygonStyles += """
return {{
color: '{color}',
weight: '{weight}',
dashArray: '{dashArray}',
fillColor: '{fillColor}',
opacity: '{borderOpacity}',
fillOpacity: '{opacity}',
}}
}}""".format(color=symbol.symbolLayer(0).borderColor().name(),
weight=symbol.symbolLayer(0).borderWidth() * 4 if symbol.symbolLayer(0).borderStyle() != 0 else "0",
dashArray=getLineStyle(symbol.symbolLayer(0).borderStyle(), symbol.symbolLayer(0).borderWidth() if symbol.symbolLayer(0).borderStyle() != 0 else "0"),
fillColor=symbol.color().name() if symbol.symbolLayer(0).brushStyle() != 0 else "none",
borderOpacity=borderOpacity,
opacity=opacity)
return graduatedPolygonStyles
def endGraduatedStyleScript():
endGraduatedStyle = """
}"""
return endGraduatedStyle
def customMarkerScript(safeLayerName, labeltext, usedFields):
if usedFields != 0:
customMarker = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
onEachFeature: pop_{safeLayerName},
pointToLayer: function (feature, latlng) {{
return L.marker(latlng, {{
icon: L.icon({{
iconUrl: feature.properties.icon_exp,
iconSize: [24, 24], // size of the icon change this to scale your icon (first coordinate is x, second y from the upper left corner of the icon)
iconAnchor: [12, 12], // point of the icon which will correspond to marker's location (first coordinate is x, second y from the upper left corner of the icon)
popupAnchor: [0, -14] // point from which the popup should open relative to the iconAnchor (first coordinate is x, second y from the upper left corner of the icon)
}})
}}){labeltext}
}}}}
);""".format(safeLayerName=safeLayerName, labeltext=labeltext)
else:
customMarker = """
var json_{safeLayerName}JSON = new L.geoJson(json_{safeLayerName}, {{
pointToLayer: function (feature, latlng) {{
return L.marker(latlng, {{
icon: L.icon({{
iconUrl: feature.properties.icon_exp,
iconSize: [24, 24], // size of the icon change this to scale your icon (first coordinate is x, second y from the upper left corner of the icon)
iconAnchor: [12, 12], // point of the icon which will correspond to marker's location (first coordinate is x, second y from the upper left corner of the icon)
popupAnchor: [0, -14] // point from which the popup should open relative to the iconAnchor (first coordinate is x, second y from the upper left corner of the icon)
}})
}}){labeltext}
}}}}
);""".format(safeLayerName=safeLayerName, labeltext=labeltext)
return customMarker
def wmsScript(safeLayerName, wms_url, wms_layer, wms_format):
wms = """
var overlay_{safeLayerName} = L.tileLayer.wms('{wms_url}', {{
layers: '{wms_layer}',
format: '{wms_format}',
transparent: true,
continuousWorld : true,
}});""".format(safeLayerName=safeLayerName,
wms_url=wms_url,
wms_layer=wms_layer,
wms_format=wms_format)
return wms
def rasterScript(safeLayerName, out_raster_name, bounds):
raster = """
var img_{safeLayerName} = '{out_raster_name}';
var img_bounds_{safeLayerName} = {bounds};
var overlay_{safeLayerName} = new L.imageOverlay(img_{safeLayerName}, img_bounds_{safeLayerName});""".format(safeLayerName=safeLayerName, out_raster_name=out_raster_name, bounds=bounds)
return raster
def titleSubScript(webmap_head, webmap_subhead):
titleSub = """
var title = new L.Control();
title.onAdd = function (map) {
this._div = L.DomUtil.create('div', 'info'); // create a div with a class "info"
this.update();
return this._div;
};
title.update = function () {
this._div.innerHTML = '<h2>""" + webmap_head.encode('utf-8') + """</h2>""" + webmap_subhead.encode('utf-8') + """'
};
title.addTo(map);"""
return titleSub
def addressSearchScript():
addressSearch = """
var osmGeocoder = new L.Control.OSMGeocoder({
collapsed: false,
position: 'topright',
text: 'Search',
});
osmGeocoder.addTo(map);"""
return addressSearch
def locateScript():
locate = """
map.locate({setView: true, maxZoom: 16});
function onLocationFound(e) {
var radius = e.accuracy / 2;
L.marker(e.latlng).addTo(map)
.bindPopup("You are within " + radius + " meters from this point").openPopup();
L.circle(e.latlng, radius).addTo(map);
}
map.on('locationfound', onLocationFound);
"""
return locate
def endHTMLscript(wfsLayers):
endHTML = """
</script>{wfsLayers}
</body>
</html>""".format(wfsLayers=wfsLayers)
return endHTML
def getLineStyle(penType, lineWidth):
dash = lineWidth * 10
dot = lineWidth * 1
gap = lineWidth * 5
if penType > 1:
if penType == 2:
penStyle = [dash, gap]
if penType == 3:
penStyle = [dot, gap]
if penType == 4:
penStyle = [dash, gap, dot, gap]
if penType == 5:
penStyle = [dash, gap, dot, gap, dot, gap]
penStyle = ','.join(map(str, penStyle))
else:
penStyle = ""
return penStyle
| radumas/qgis2web | leafletScriptStrings.py | Python | gpl-2.0 | 20,814 | 0.002114 |
"""
WSGI config for superlists project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
application = get_wsgi_application()
| xueyaodeai/DjangoWebsite | superlists/wsgi.py | Python | mit | 398 | 0 |
"""The gearbest component."""
| jnewland/home-assistant | homeassistant/components/gearbest/__init__.py | Python | apache-2.0 | 30 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0032_auto_20160819_1209'),
]
operations = [
migrations.AlterField(
model_name='docket',
name='nature_of_suit',
field=models.CharField(help_text=b'The nature of suit code from PACER.', max_length=1000, blank=True),
),
]
| voutilad/courtlistener | cl/search/migrations/0033_auto_20160819_1214.py | Python | agpl-3.0 | 473 | 0.002114 |
## @file
# This file is used to create a database used by ECC tool
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import sqlite3
import Common.LongFilePathOs as os, time
import Common.EdkLogger as EdkLogger
import CommonDataClass.DataClass as DataClass
from Table.TableDataModel import TableDataModel
from Table.TableFile import TableFile
from Table.TableFunction import TableFunction
from Table.TablePcd import TablePcd
from Table.TableIdentifier import TableIdentifier
from Table.TableReport import TableReport
from MetaFileWorkspace.MetaFileTable import ModuleTable
from MetaFileWorkspace.MetaFileTable import PackageTable
from MetaFileWorkspace.MetaFileTable import PlatformTable
from Table.TableFdf import TableFdf
##
# Static definitions
#
DATABASE_PATH = "Ecc.db"
## Database
#
# This class defined the ECC databse
# During the phase of initialization, the database will create all tables and
# insert all records of table DataModel
#
# @param object: Inherited from object class
# @param DbPath: A string for the path of the ECC database
#
# @var Conn: Connection of the ECC database
# @var Cur: Cursor of the connection
# @var TblDataModel: Local instance for TableDataModel
#
class Database(object):
def __init__(self, DbPath):
self.DbPath = DbPath
self.Conn = None
self.Cur = None
self.TblDataModel = None
self.TblFile = None
self.TblFunction = None
self.TblIdentifier = None
self.TblPcd = None
self.TblReport = None
self.TblInf = None
self.TblDec = None
self.TblDsc = None
self.TblFdf = None
## Initialize ECC database
#
# 1. Delete all old existing tables
# 2. Create new tables
# 3. Initialize table DataModel
#
def InitDatabase(self, NewDatabase = True):
EdkLogger.verbose("\nInitialize ECC database started ...")
#
# Drop all old existing tables
#
if NewDatabase:
if os.path.exists(self.DbPath):
os.remove(self.DbPath)
self.Conn = sqlite3.connect(self.DbPath, isolation_level = 'DEFERRED')
self.Conn.execute("PRAGMA page_size=4096")
self.Conn.execute("PRAGMA synchronous=OFF")
# to avoid non-ascii charater conversion error
self.Conn.text_factory = str
self.Cur = self.Conn.cursor()
self.TblDataModel = TableDataModel(self.Cur)
self.TblFile = TableFile(self.Cur)
self.TblFunction = TableFunction(self.Cur)
self.TblIdentifier = TableIdentifier(self.Cur)
self.TblPcd = TablePcd(self.Cur)
self.TblReport = TableReport(self.Cur)
self.TblInf = ModuleTable(self.Cur)
self.TblDec = PackageTable(self.Cur)
self.TblDsc = PlatformTable(self.Cur)
self.TblFdf = TableFdf(self.Cur)
#
# Create new tables
#
if NewDatabase:
self.TblDataModel.Create()
self.TblFile.Create()
self.TblFunction.Create()
self.TblPcd.Create()
self.TblReport.Create()
self.TblInf.Create()
self.TblDec.Create()
self.TblDsc.Create()
self.TblFdf.Create()
#
# Init each table's ID
#
self.TblDataModel.InitID()
self.TblFile.InitID()
self.TblFunction.InitID()
self.TblPcd.InitID()
self.TblReport.InitID()
self.TblInf.InitID()
self.TblDec.InitID()
self.TblDsc.InitID()
self.TblFdf.InitID()
#
# Initialize table DataModel
#
if NewDatabase:
self.TblDataModel.InitTable()
EdkLogger.verbose("Initialize ECC database ... DONE!")
## Query a table
#
# @param Table: The instance of the table to be queried
#
def QueryTable(self, Table):
Table.Query()
## Close entire database
#
# Commit all first
# Close the connection and cursor
#
def Close(self):
#
# Commit to file
#
self.Conn.commit()
#
# Close connection and cursor
#
self.Cur.close()
self.Conn.close()
## Insert one file information
#
# Insert one file's information to the database
# 1. Create a record in TableFile
# 2. Create functions one by one
# 2.1 Create variables of function one by one
# 2.2 Create pcds of function one by one
# 3. Create variables one by one
# 4. Create pcds one by one
#
def InsertOneFile(self, File):
#
# Insert a record for file
#
FileID = self.TblFile.Insert(File.Name, File.ExtName, File.Path, File.FullPath, Model = File.Model, TimeStamp = File.TimeStamp)
if File.Model == DataClass.MODEL_FILE_C or File.Model == DataClass.MODEL_FILE_H:
IdTable = TableIdentifier(self.Cur)
IdTable.Table = "Identifier%s" % FileID
IdTable.Create()
#
# Insert function of file
#
for Function in File.FunctionList:
FunctionID = self.TblFunction.Insert(Function.Header, Function.Modifier, Function.Name, Function.ReturnStatement, \
Function.StartLine, Function.StartColumn, Function.EndLine, Function.EndColumn, \
Function.BodyStartLine, Function.BodyStartColumn, FileID, \
Function.FunNameStartLine, Function.FunNameStartColumn)
#
# Insert Identifier of function
#
for Identifier in Function.IdentifierList:
IdentifierID = IdTable.Insert(Identifier.Modifier, Identifier.Type, Identifier.Name, Identifier.Value, Identifier.Model, \
FileID, FunctionID, Identifier.StartLine, Identifier.StartColumn, Identifier.EndLine, Identifier.EndColumn)
#
# Insert Pcd of function
#
for Pcd in Function.PcdList:
PcdID = self.TblPcd.Insert(Pcd.CName, Pcd.TokenSpaceGuidCName, Pcd.Token, Pcd.DatumType, Pcd.Model, \
FileID, FunctionID, Pcd.StartLine, Pcd.StartColumn, Pcd.EndLine, Pcd.EndColumn)
#
# Insert Identifier of file
#
for Identifier in File.IdentifierList:
IdentifierID = IdTable.Insert(Identifier.Modifier, Identifier.Type, Identifier.Name, Identifier.Value, Identifier.Model, \
FileID, -1, Identifier.StartLine, Identifier.StartColumn, Identifier.EndLine, Identifier.EndColumn)
#
# Insert Pcd of file
#
for Pcd in File.PcdList:
PcdID = self.TblPcd.Insert(Pcd.CName, Pcd.TokenSpaceGuidCName, Pcd.Token, Pcd.DatumType, Pcd.Model, \
FileID, -1, Pcd.StartLine, Pcd.StartColumn, Pcd.EndLine, Pcd.EndColumn)
EdkLogger.verbose("Insert information from file %s ... DONE!" % File.FullPath)
## UpdateIdentifierBelongsToFunction
#
# Update the field "BelongsToFunction" for each Indentifier
#
#
def UpdateIdentifierBelongsToFunction_disabled(self):
EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers started ...")
SqlCommand = """select ID, BelongsToFile, StartLine, EndLine, Model from Identifier"""
EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
self.Cur.execute(SqlCommand)
Records = self.Cur.fetchall()
for Record in Records:
IdentifierID = Record[0]
BelongsToFile = Record[1]
StartLine = Record[2]
EndLine = Record[3]
Model = Record[4]
#
# Check whether an identifier belongs to a function
#
EdkLogger.debug(4, "For common identifiers ... ")
SqlCommand = """select ID from Function
where StartLine < %s and EndLine > %s
and BelongsToFile = %s""" % (StartLine, EndLine, BelongsToFile)
EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
self.Cur.execute(SqlCommand)
IDs = self.Cur.fetchall()
for ID in IDs:
SqlCommand = """Update Identifier set BelongsToFunction = %s where ID = %s""" % (ID[0], IdentifierID)
EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
self.Cur.execute(SqlCommand)
#
# Check whether the identifier is a function header
#
EdkLogger.debug(4, "For function headers ... ")
if Model == DataClass.MODEL_IDENTIFIER_COMMENT:
SqlCommand = """select ID from Function
where StartLine = %s + 1
and BelongsToFile = %s""" % (EndLine, BelongsToFile)
EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
self.Cur.execute(SqlCommand)
IDs = self.Cur.fetchall()
for ID in IDs:
SqlCommand = """Update Identifier set BelongsToFunction = %s, Model = %s where ID = %s""" % (ID[0], DataClass.MODEL_IDENTIFIER_FUNCTION_HEADER, IdentifierID)
EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
self.Cur.execute(SqlCommand)
EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers ... DONE")
## UpdateIdentifierBelongsToFunction
#
# Update the field "BelongsToFunction" for each Indentifier
#
#
def UpdateIdentifierBelongsToFunction(self):
EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers started ...")
SqlCommand = """select ID, BelongsToFile, StartLine, EndLine from Function"""
Records = self.TblFunction.Exec(SqlCommand)
Data1 = []
Data2 = []
for Record in Records:
FunctionID = Record[0]
BelongsToFile = Record[1]
StartLine = Record[2]
EndLine = Record[3]
#Data1.append(("'file%s'" % BelongsToFile, FunctionID, BelongsToFile, StartLine, EndLine))
#Data2.append(("'file%s'" % BelongsToFile, FunctionID, DataClass.MODEL_IDENTIFIER_FUNCTION_HEADER, BelongsToFile, DataClass.MODEL_IDENTIFIER_COMMENT, StartLine - 1))
SqlCommand = """Update Identifier%s set BelongsToFunction = %s where BelongsToFile = %s and StartLine > %s and EndLine < %s""" % \
(BelongsToFile, FunctionID, BelongsToFile, StartLine, EndLine)
self.TblIdentifier.Exec(SqlCommand)
SqlCommand = """Update Identifier%s set BelongsToFunction = %s, Model = %s where BelongsToFile = %s and Model = %s and EndLine = %s""" % \
(BelongsToFile, FunctionID, DataClass.MODEL_IDENTIFIER_FUNCTION_HEADER, BelongsToFile, DataClass.MODEL_IDENTIFIER_COMMENT, StartLine - 1)
self.TblIdentifier.Exec(SqlCommand)
# #
# # Check whether an identifier belongs to a function
# #
# print Data1
# SqlCommand = """Update ? set BelongsToFunction = ? where BelongsToFile = ? and StartLine > ? and EndLine < ?"""
# print SqlCommand
# EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
# self.Cur.executemany(SqlCommand, Data1)
#
# #
# # Check whether the identifier is a function header
# #
# EdkLogger.debug(4, "For function headers ... ")
# SqlCommand = """Update ? set BelongsToFunction = ?, Model = ? where BelongsToFile = ? and Model = ? and EndLine = ?"""
# EdkLogger.debug(4, "SqlCommand: %s" %SqlCommand)
# self.Cur.executemany(SqlCommand, Data2)
#
# EdkLogger.verbose("Update 'BelongsToFunction' for Identifiers ... DONE")
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
EdkLogger.Initialize()
#EdkLogger.SetLevel(EdkLogger.VERBOSE)
EdkLogger.SetLevel(EdkLogger.DEBUG_0)
EdkLogger.verbose("Start at " + time.strftime('%H:%M:%S', time.localtime()))
Db = Database(DATABASE_PATH)
Db.InitDatabase()
Db.QueryTable(Db.TblDataModel)
identifier1 = DataClass.IdentifierClass(-1, '', '', "i''1", 'aaa', DataClass.MODEL_IDENTIFIER_COMMENT, 1, -1, 32, 43, 54, 43)
identifier2 = DataClass.IdentifierClass(-1, '', '', 'i1', 'aaa', DataClass.MODEL_IDENTIFIER_COMMENT, 1, -1, 15, 43, 20, 43)
identifier3 = DataClass.IdentifierClass(-1, '', '', 'i1', 'aaa', DataClass.MODEL_IDENTIFIER_COMMENT, 1, -1, 55, 43, 58, 43)
identifier4 = DataClass.IdentifierClass(-1, '', '', "i1'", 'aaa', DataClass.MODEL_IDENTIFIER_COMMENT, 1, -1, 77, 43, 88, 43)
fun1 = DataClass.FunctionClass(-1, '', '', 'fun1', '', 21, 2, 60, 45, 1, 23, 0, [], [])
file = DataClass.FileClass(-1, 'F1', 'c', 'C:\\', 'C:\\F1.exe', DataClass.MODEL_FILE_C, '2007-12-28', [fun1], [identifier1, identifier2, identifier3, identifier4], [])
Db.InsertOneFile(file)
Db.UpdateIdentifierBelongsToFunction()
Db.QueryTable(Db.TblFile)
Db.QueryTable(Db.TblFunction)
Db.QueryTable(Db.TblPcd)
Db.QueryTable(Db.TblIdentifier)
Db.Close()
EdkLogger.verbose("End at " + time.strftime('%H:%M:%S', time.localtime()))
| intel/ipmctl | BaseTools/Source/Python/Ecc/Database.py | Python | bsd-3-clause | 14,225 | 0.005413 |
#! /usr/bin/env python3
import sys
import csv
import menu_map
import datetime
db_input = sys.argv[1]
def normalize_elem(min_, max_, value):
'''DESCUBRA'''
return (value - min_)/(max_ - min_) - 0.5
def normalize(array, min_, max_):
'''DESCUBRA'''
for i in range(0, len(array)):
array[i] = normalize_elem(min_, max_, array[i])
return array
with open(db_input, newline='\n') as db_in:
day_of_the_week = []
month = []
menu = []
temp_avg = []
rain_acc = []
nutri_week = []
vacation = []
strike = []
total_enrolled = []
target = []
traindb = csv.reader(db_in, delimiter='\t', quotechar='"')
for row in traindb:
date = row[0].split('-')
date_info = datetime.date(int(date[0]), int(date[1]), int(date[2])).timetuple()
day_of_the_week.append(float(date_info.tm_wday))
month.append(float(date_info.tm_mon))
menu.append(menu_map.map[row[1]])
temp_avg.append(float(row[4]))
rain_acc.append(float(row[5]))
nutri_week.append(float(row[6]))
vacation.append(float(row[7]))
strike.append(float(row[8]))
total_enrolled.append(float(row[9]))
target.append(float(row[3]))
# normalizing values into -0.5 - 0.5 range
day_of_the_week = normalize(day_of_the_week, 0, 6)
month = normalize(month, 1, 12)
menu = normalize(menu, 0, 27)
temp_avg = normalize(temp_avg, 5, 40)
rain_acc = normalize(rain_acc, 0, 100)
nutri_week = normalize(nutri_week, 0, 1)
vacation = normalize(vacation, 0, 1)
strike = normalize(strike, 0, 1)
total_enrolled = normalize(total_enrolled, 30000, 50000)
target = normalize(target, 1000, 13000)
# input normalization for fann using
print(str(len(target)) + ' 9 1')
for i in range(0, len(target)):
print(str(day_of_the_week[i]) + ' ' + str(month[i]) + ' ' + str(menu[i]) + ' ' + str(temp_avg[i]) + ' ' + str(rain_acc[i]) + ' ' + str(nutri_week[i]) + ' ' + str(vacation[i]) + ' ' + str(strike[i]) + ' ' + str(total_enrolled[i]))
print(str(target[i]))
| guilhermerc/fournir-au-public | tools/fann_input_norm.py | Python | gpl-3.0 | 2,101 | 0.005236 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# create a class for correlation matrix and other class to find out similar users who have rated a particular movie w.r.t. a particular user based on correlation
import numpy as np
import pandas as pd
class Correlation:
"""
"""
def pearson(self, rating_matrix):
return pd.DataFrame(rating_matrix.T).corr().as_matrix()
| sagnik17/Movie-Recommendation-System | mrs/recsys/cf.py | Python | gpl-3.0 | 387 | 0.005168 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 liangzy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from lxml.etree import HTML
from sillypool.settings import QQWRY_PATH
from sillypool.database.models import Proxy
from sillypool.libs.exception import ParserTypeNotSupportError
from sillypool.libs.iplocator import IPLocator
class Parser:
def __init__(self):
self.ip_locator = IPLocator(QQWRY_PATH)
def parse(self, response, url_config, url):
if url_config['type'] == 'xpath':
return self.parse_xpath(response, url_config, url)
else:
raise ParserTypeNotSupportError(url_config['type'])
def parse_xpath(self, response, url_config, url):
proxy_list = []
root = HTML(response)
proxy_all = root.xpath(url_config['pattern'])
for proxy in proxy_all:
try:
ip = proxy.xpath(url_config['position']['ip'])[0].text
country, address = self.ip_locator.get_ip_address(self.ip_locator.str2ip(ip))
proxy = Proxy(
ip=proxy.xpath(url_config['position']['ip'])[0].text,
port=proxy.xpath(url_config['position']['port'])[0].text,
country=self.judge_country(country),
area=address,
crawl_time=datetime.datetime.utcnow()
)
proxy_list.append(proxy)
except OSError as e:
logging.error("parser error: " + url)
break
except Exception as e:
logging.error(e)
logging.error('proxy: ' + proxy)
return proxy_list
@staticmethod
def judge_country(country):
china_area = ['河北', '山东', '辽宁', '黑龙江', '吉林',
'甘肃', '青海', '河南', '江苏', '湖北',
'湖南', '江西', '浙江', '广东', '云南',
'福建', '台湾', '海南', '山西', '四川',
'陕西', '贵州', '安徽', '重庆', '北京',
'上海', '天津', '广西', '内蒙', '西藏',
'新疆', '宁夏', '香港', '澳门']
for area in china_area:
if area in country:
return "中国"
return country
| vliangzy/sillypool | sillypool/spider/parser/xpath.py | Python | apache-2.0 | 2,885 | 0.000365 |
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from . import sale_order
| Trust-Code/trust-addons | trust_sale/models/__init__.py | Python | agpl-3.0 | 1,413 | 0 |
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
from kivy.uix.widget import Widget
class HorizontalSpacer(Widget):
def __init__(self, **kwargs):
super(HorizontalSpacer, self).__init__( **kwargs)
self.size_hint_y = None
self.height=0
class VerticalSpacer(Widget):
def __init__(self, **kwargs):
super(VerticalSpacer, self).__init__( **kwargs)
self.size_hint_x = None
self.width=0
| autosportlabs/RaceCapture_App | spacer.py | Python | gpl-3.0 | 1,123 | 0.005343 |
import sys
import os
import shutil as sh
import logging as log
import multiprocessing as mp
import ex.util as eu
from OMOP import OMOP
import base
eu.InitLog(log.INFO)
dat_dest="D:/Documents/DataSet/omop/simulation/"
# dat_dest="~/h/data/omop/simulation/"
def DoTask(configs):
modifier=configs[0]
folder=base.Simulate(
modifier, validation=True, n_drug=10, n_cond=10, n_person=500,
cond_alt=configs[1], ob_alt=configs[2], drug_alt=configs[3],
dexposure_alt=configs[4],doutcome_alt=configs[5],ind_alt=configs[6],
no_simu=True)
ds=OMOP(modifier, folder)
ds.CreateDB()
ds.OrderDB()
ds.IndexDB()
ds.JoinDrugCond(simu=True)
ds.ExpandCondOccur(simu=True)
ds.GenCountTable()
return(folder)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1].startswith('s'):
log.info('''OMOP Single threaded simulation.''')
parallel=False
else:
log.info('''OMOP Parallel simulation.''')
parallel=True
log.warn("A Numpy bug may make char arrays wrong in matlab. To fix A, use A=reshape(A, size(A,2), size(A,1))'")
tasks=[# ("TEST", False, False, False, False, False, False),
# ("TEST_C", True, False, False, False, False, False),
# ("TEST_OB", False, True, False, False, False, False),
# ("TEST_D", False, False, True, False, False, False),
# ("TEST_DE", False, False, False, True, False, False),
# ("TEST_DO", False, False, False, False, True, False),
# ("TEST_IN", False, False, False, False, False, True),
("TEST_C_D_DO", True, False, True, False, True, False),
("TEST_D_DO", False, False, True, False, True, False),
]
if parallel:
pool_size=min((mp.cpu_count() - 1, len(tasks), 5))
p=mp.Pool(max(2,pool_size))
folders=p.map(DoTask, tasks)
else:
folders=[DoTask(task) for task in tasks]
for folder in folders:
os.system("mv {0}/*.mat {1}".format(folder, dat_dest))
os.system("cp {0}/*.db3 {1}".format(folder, dat_dest))
| excelly/xpy-ml | omop/simu_flow.py | Python | apache-2.0 | 2,109 | 0.009483 |
# -*- coding: utf-8 -*-
# Gedit Better Defaults plugin
# Copyright (C) 2017 Fabio Zendhi Nagao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gedit
import gtk
import re
ui_str = """
<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_4">
<menuitem action="DuplicateLine" name="Duplicate line"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
class BetterDefaultsWindowHelper:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
self.install_ui()
for view in self._window.get_views():
self.activate_view(view)
for doc in self._window.get_documents():
self.activate_doc(doc)
self._tab_added_id = self._window.connect("tab_added", self.on_tab_added)
# self._key_press_id = self._window.connect("key-press-event", self.on_key_press_event)
def deactivate(self):
# self._window.disconnect(self._key_press_id)
self._window.disconnect(self._tab_added_id)
for doc in self._window.get_documents():
self.deactivate_doc(doc)
for view in self._window.get_views():
self.deactivate_view(view)
self.uninstall_ui()
self._window = None
self._plugin = None
def update_ui(self):
pass
# # TODO: Use key press and button press events instead of update_ui
# doc = self._window.get_active_document()
# if doc:
# bounds = doc.get_selection_bounds()
# if bounds:
# content = doc.get_text(*bounds).decode("utf-8")
# highlightable = re.compile(r"[\S\{\}\[\]\(\)]+", flags=re.UNICODE)
# if highlightable.search(content):
# doc.set_search_text(content, gedit.SEARCH_CASE_SENSITIVE)
# else:
# doc.set_search_text("", gedit.SEARCH_CASE_SENSITIVE)
# else:
# doc.set_search_text("", gedit.SEARCH_CASE_SENSITIVE)
def install_ui(self):
manager = self._window.get_ui_manager()
self._action_group = gtk.ActionGroup("BetterDefaultsPluginActions")
self._action_group.add_actions([
( "DuplicateLine", None, _("Duplicate line"), "<Ctrl><Shift>d", _("Duplicate Line"), self.duplicate_line )
])
manager.insert_action_group(self._action_group, -1)
self._ui_id = manager.add_ui_from_string(ui_str)
def uninstall_ui(self):
manager = self._window.get_ui_manager()
manager.remove_ui(self._ui_id)
manager.remove_action_group(self._action_group)
manager.ensure_update()
def activate_view(self, view):
view.set_smart_home_end(True)
view.set_data("vscrolling_helper", (0.0, 0.0))
size_allocate_id = view.connect("size-allocate", self.on_size_allocate)
view.set_data("on_size_allocate_id", size_allocate_id)
va = view.get_vadjustment()
value_change_id = va.connect("value_changed", self.on_value_changed)
view.set_data("on_value_changed_id", value_change_id)
def deactivate_view(self, view):
va = view.get_vadjustment()
va.disconnect( view.get_data("on_value_changed_id") )
view.disconnect( view.get_data("on_size_allocate_id") )
view.set_smart_home_end(False)
def activate_doc(self, doc):
save_id = doc.connect("save", self.on_document_save)
doc.set_data("on_save_id", save_id)
def deactivate_doc(self, doc):
doc.disconnect( view.get_data("on_save_id") )
def on_tab_added(self, w, t):
self.activate_view(t.get_view())
self.activate_doc(t.get_document())
def on_document_save(self, doc):
piter = doc.get_end_iter()
if piter.starts_line():
while piter.backward_char():
if not piter.ends_line():
piter.forward_to_line_end()
break
doc.delete(piter, doc.get_end_iter())
def on_size_allocate(self, view, allocation):
va = view.get_vadjustment()
vsz = va.get_upper() + ( va.get_page_size() / 2 )
if va.get_upper() > va.get_page_size():
va.set_upper(vsz)
if va.get_value() < view.get_data("vscrolling_helper")[1]:
va.set_value(view.get_data("vscrolling_helper")[1])
view.set_data("vscrolling_helper", (vsz, va.get_value()))
def on_value_changed(self, adjustment):
view = self._window.get_active_view()
va = view.get_vadjustment()
if( va.get_upper() == view.get_data("vscrolling_helper")[0] ):
view.set_data( "vscrolling_helper", ( view.get_data("vscrolling_helper")[0], va.get_value() ) )
def duplicate_line(self, action):
doc = self._window.get_active_document()
doc.begin_user_action()
liter = doc.get_iter_at_mark(doc.get_insert())
liter.set_line_offset(0);
riter = doc.get_iter_at_mark(doc.get_insert())
f = riter.forward_line()
line = doc.get_slice(liter, riter, True)
if f:
doc.insert(riter, line)
else:
doc.insert(riter, '\n' + line)
doc.end_user_action()
def enclose_selected(self, l, r):
doc = self._window.get_active_document()
(a, b) = doc.get_selection_bounds()
doc.insert(b, r)
(a, b) = doc.get_selection_bounds()
doc.insert(a, l)
def on_key_press_event(self, window, event):
doc = self._window.get_active_document()
bounds = doc.get_selection_bounds()
if bounds:
c = event.keyval
if c == 123:
self.enclose_selected('{', '}')
elif c == 91:
self.enclose_selected('[', ']')
elif c == 40:
self.enclose_selected('(', ')')
elif c == 60:
self.enclose_selected('<', '>')
elif c == 65111:
self.enclose_selected('"', '"')
elif c == 65105:
self.enclose_selected("'", "'")
if c in [123, 91, 40, 60, 65111, 65105]:
return True
class BetterDefaultsPlugin(gedit.Plugin):
WINDOW_DATA_KEY = "BetterDefaultsPluginWindowData"
def __init__(self):
gedit.Plugin.__init__(self)
def activate(self, window):
helper = BetterDefaultsWindowHelper(self, window)
window.set_data(self.WINDOW_DATA_KEY, helper)
def deactivate(self, window):
window.get_data(self.WINDOW_DATA_KEY).deactivate()
window.set_data(self.WINDOW_DATA_KEY, None)
def update_ui(self, window):
window.get_data(self.WINDOW_DATA_KEY).update_ui()
| nagaozen/my-os-customizations | home/nagaozen/.gnome2/gedit/plugins/better-defaults/__init__.py | Python | gpl-3.0 | 6,370 | 0.027002 |
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
# Modules pins and description
M1A = session.query(Module).filter(Module.name == 'M1A').first()
M1B = session.query(Module).filter(Module.name == 'M1B').first()
M1C = session.query(Module).filter(Module.name == 'M1C').first()
M2A = session.query(Module).filter(Module.name == 'M2A').first()
M2B = session.query(Module).filter(Module.name == 'M2B').first()
M2C = session.query(Module).filter(Module.name == 'M2C').first()
M3A = session.query(Module).filter(Module.name == 'M3A').first()
M3B = session.query(Module).filter(Module.name == 'M3B').first()
M3C = session.query(Module).filter(Module.name == 'M3C').first()
M4A = session.query(Module).filter(Module.name == 'M4A').first()
M4B = session.query(Module).filter(Module.name == 'M4B').first()
M4C = session.query(Module).filter(Module.name == 'M4C').first()
M5A = session.query(Module).filter(Module.name == 'M5A').first()
M5B = session.query(Module).filter(Module.name == 'M5B').first()
M5C = session.query(Module).filter(Module.name == 'M5C').first()
M6A = session.query(Module).filter(Module.name == 'M6A').first()
M6B = session.query(Module).filter(Module.name == 'M6B').first()
M6C = session.query(Module).filter(Module.name == 'M6C').first()
M7A = session.query(Module).filter(Module.name == 'M7A').first()
M7B = session.query(Module).filter(Module.name == 'M7B').first()
M7C = session.query(Module).filter(Module.name == 'M7C').first()
# Statup inputs BCM pin
input_pins = [0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25]
# Statup outputs BCM pin
output_pins = [26, 27]
def main():
# Set up GPIO using BCM numbering
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(26, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(27, GPIO.OUT, initial=GPIO.LOW)
def modo0():
for pin in input_pins:
try:
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
except:
print u"Erro de ativação do Pino BCM %s", pin
stdout.flush()
for pin in output_pins:
try:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.LOW)
except:
print u"Erro na ativação do Pino BCM %s", pin
stdout.flush()
return(True)
def modo1():
global M1A, M1B, M1C
global M2A, M2B, M2C
global M3A, M3B, M3C
global M4A, M4B, M4C
global M5A, M5B, M5C
global M6A, M6B, M6C
global M7A, M7B, M7C
try:
GPIO.output(26, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 26'
try:
GPIO.output(27, GPIO.LOW)
except:
print u'Erro ao setar o nível do pino BCM pin 27'
sleep(5)
discovery_mods(M1A, M1B, M1C)
discovery_mods(M2A, M2B, M2C)
discovery_mods(M3A, M3B, M3C)
discovery_mods(M4A, M4B, M4C)
discovery_mods(M5A, M5B, M5C)
discovery_mods(M6A, M6B, M6C)
discovery_mods(M7A, M7B, M7C)
def modo3():
try:
GPIO.output(26, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 26'
try:
GPIO.output(27, GPIO.HIGH)
except:
print u'Erro ao setar o nível do pino BCM pin 27'
return True
def switch_on(_M):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
GPIO.setup(_M.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(_M.gpio, GPIO.HIGH)
_M.status = True
session.commit()
else:
print 'ERROR! This pin is set as a input'
def switch_off(_M):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
GPIO.setup(_M.gpio, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(_M.gpio, GPIO.LOW)
_M.status = False
session.commit()
else:
print 'ERROR! This pin is set as a input'
def reset_pin(_M, _time):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.gpio_function(_M.gpio) == 0:
switch_on(_M)
sleep(_time)
switch_off(_M)
else:
print 'ERROR! This pin is set as a input'
def softreset(_host):
from subprocess import call
call(["net", "rpc", "shutdown", "-r", "-I", "192.168.1.21", "-U", "Administrador%SemParar"])
def discovery_mods(_MA, _MB, _MC):
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if GPIO.input(_MA.gpio) == 0 and GPIO.input(_MB.gpio) == 1:
GPIO.setup(_MA.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MB.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MC.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
_MA.io_type = 'input'
_MA.rpull = False
_MB.io_type = 'input'
_MB.rpull = False
_MC.io_type = 'input'
_MC.rpull = False
session.commit()
elif GPIO.input(_MA.gpio) == 1 and GPIO.input(_MB.gpio) == 0:
GPIO.setup(_MA.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(_MB.gpio, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(_MC.gpio, GPIO.OUT, initial=GPIO.LOW)
_MA.io_type = 'output'
_MA.status = False
_MB.io_type = 'output'
_MB.status = False
_MC.io_type = 'output'
_MC.status = False
session.commit()
else:
GPIO.setup(_MA.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MB.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(_MC.gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
_MA.io_type = 'empty'
_MA.rpull = False
_MB.io_type = 'empty'
_MB.rpull = False
_MC.io_type = 'empty'
_MC.rpull = False
session.commit()
def cleanup_pins():
import RPi.GPIO as GPIO
from models import session
from models import Device
from models import Module
from models import Event_Log
from time import sleep
from sys import stdout
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()
if __name__ == "__main__":
main()
| TemosEngenharia/RPI-IO | RPi_IO/rpi_io.py | Python | agpl-3.0 | 6,968 | 0.004167 |
#!/usr/bin/python3
#
# examples/xfixes-selection-notify.py -- demonstrate the XFIXES extension
# SelectionNotify event.
#
# Copyright (C) 2019
# Tony Crisci <tony@dubstepdish.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python 2/3 compatibility.
from __future__ import print_function
import sys
import os
import time
# Change path so we find Xlib
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Xlib.display import Display
from Xlib.ext import xfixes
def main(argv):
if len(sys.argv) != 2:
sys.exit('usage: {0} SELECTION\n\n'
'SELECTION is typically PRIMARY, SECONDARY or CLIPBOARD.\n'
.format(sys.argv[0]))
display = Display()
sel_name = sys.argv[1]
sel_atom = display.get_atom(sel_name)
if not display.has_extension('XFIXES'):
if display.query_extension('XFIXES') is None:
print('XFIXES extension not supported', file=sys.stderr)
return 1
xfixes_version = display.xfixes_query_version()
print('Found XFIXES version %s.%s' % (
xfixes_version.major_version,
xfixes_version.minor_version,
), file=sys.stderr)
screen = display.screen()
mask = xfixes.XFixesSetSelectionOwnerNotifyMask | \
xfixes.XFixesSelectionWindowDestroyNotifyMask | \
xfixes.XFixesSelectionClientCloseNotifyMask
display.xfixes_select_selection_input(screen.root, sel_atom, mask)
while True:
e = display.next_event()
print(e)
if (e.type, e.sub_code) == display.extension_event.SetSelectionOwnerNotify:
print('SetSelectionOwner: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionWindowDestroyNotify:
print('SelectionWindowDestroy: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionClientCloseNotify:
print('SelectionClientClose: owner=0x{0:08x}'.format(e.owner.id))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| python-xlib/python-xlib | examples/xfixes-selection-notify.py | Python | lgpl-2.1 | 2,764 | 0.002894 |
# 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.dellos6 import dellos6_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'dellos6'
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
rc, out, err = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(tmp, task_vars)
return result
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(dellos6_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
| qrkourier/ansible | lib/ansible/plugins/action/dellos6.py | Python | gpl-3.0 | 4,351 | 0.001379 |
## @file Test_Reverse_Transform.py
#
# This script file has simple implementation the reverse odsa transform algorithm
## reverse_jumps function
#
# make reverse jumps to the elloborateed output
def reverse_jumps(letter, index, input_text):
output = input_text
for x in xrange(0, len(letter)):
output = output[:output.index(letter[x])] + output[output.index(letter[x])+1:]
output = output[:index[x]]+[letter[x]]+output[index[x]:]
return output
## elloborate function
#
# ellobrate the output text from the letter output map
def elloborate(letter, index):
output = []
for x in xrange(0, len(letter)):
for y in xrange(index[x],index[x+1]):
output += letter[x]
return output
#main program
fo = open('../Test/Transform.kt','rb')
lines = []
for line in fo:
lines.append(line.strip())
fo.close()
fo = open('../Test/TestOutput.txt','wb')
fo.write('')
fo.close()
inSize = 0
outSize = 0
len_letter_map = 0
for x in xrange(0,len(lines)/4):
letter_map = lines[4*x].split(' ')
index_map = map(int, lines[4*x+1].split(' '))
letter_transform = lines[4*x+2].split(' ')
index_transform = map(int, lines[4*x+3].split(' '))
output = elloborate(letter_map, index_map)
output = reverse_jumps(letter_transform, index_transform, output)
fo = open('../Test/TestOutput.txt','a')
fo.write(''.join(output))
fo.close()
inSize += len(output)
outSize += len(letter_map)+len(letter_transform)+len(index_map)+len(index_transform)
len_letter_map += len(letter_map)
if x == (len(lines)/4)-1 and not len(lines)%4 == 0:
letter_map = lines[4*(x+1)].split(' ')
index_map = map(int, lines[4*(x+1)+1].split(' '))
output = elloborate(letter_map, index_map)
fo = open('../Test/TestOutput.txt','a')
fo.write(''.join(output))
fo.close()
inSize += len(output)
outSize += len(letter_map)+len(letter_transform)+len(index_map)+len(index_transform)
len_letter_map += len(letter_map)
print 'Input size =', inSize, ' bytes.'
print 'Output size =', outSize, ' bytes.'
print 'Actual file size =', ((outSize*2)+len_letter_map+4), ' bytes'
print 'Efficency =', (100 - (outSize)*100/inSize), '%'
print 'Actual efficency =', (100 - ((outSize*2)+len_letter_map+4)*100/inSize), '%'
| melvinodsa/odsatransform | Code/Test_Reverse_Transform.py | Python | apache-2.0 | 2,316 | 0.007772 |
# Easy to use system logging for Python's logging module.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: December 10, 2020
# URL: https://coloredlogs.readthedocs.io
"""
Easy to use UNIX system logging for Python's :mod:`logging` module.
Admittedly system logging has little to do with colored terminal output, however:
- The `coloredlogs` package is my attempt to do Python logging right and system
logging is an important part of that equation.
- I've seen a surprising number of quirks and mistakes in system logging done
in Python, for example including ``%(asctime)s`` in a format string (the
system logging daemon is responsible for adding timestamps and thus you end
up with duplicate timestamps that make the logs awful to read :-).
- The ``%(programname)s`` filter originated in my system logging code and I
wanted it in `coloredlogs` so the step to include this module wasn't that big.
- As a bonus this Python module now has a test suite and proper documentation.
So there :-P. Go take a look at :func:`enable_system_logging()`.
"""
# Standard library modules.
import logging
import logging.handlers
import os
import socket
import sys
# External dependencies.
from humanfriendly import coerce_boolean
from humanfriendly.compat import on_macos, on_windows
# Modules included in our package.
from coloredlogs import (
DEFAULT_LOG_LEVEL,
ProgramNameFilter,
adjust_level,
find_program_name,
level_to_number,
replace_handler,
)
LOG_DEVICE_MACOSX = '/var/run/syslog'
"""The pathname of the log device on Mac OS X (a string)."""
LOG_DEVICE_UNIX = '/dev/log'
"""The pathname of the log device on Linux and most other UNIX systems (a string)."""
DEFAULT_LOG_FORMAT = '%(programname)s[%(process)d]: %(levelname)s %(message)s'
"""
The default format for log messages sent to the system log (a string).
The ``%(programname)s`` format requires :class:`~coloredlogs.ProgramNameFilter`
but :func:`enable_system_logging()` takes care of this for you.
The ``name[pid]:`` construct (specifically the colon) in the format allows
rsyslogd_ to extract the ``$programname`` from each log message, which in turn
allows configuration files in ``/etc/rsyslog.d/*.conf`` to filter these log
messages to a separate log file (if the need arises).
.. _rsyslogd: https://en.wikipedia.org/wiki/Rsyslog
"""
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class SystemLogging(object):
"""Context manager to enable system logging."""
def __init__(self, *args, **kw):
"""
Initialize a :class:`SystemLogging` object.
:param args: Positional arguments to :func:`enable_system_logging()`.
:param kw: Keyword arguments to :func:`enable_system_logging()`.
"""
self.args = args
self.kw = kw
self.handler = None
def __enter__(self):
"""Enable system logging when entering the context."""
if self.handler is None:
self.handler = enable_system_logging(*self.args, **self.kw)
return self.handler
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""
Disable system logging when leaving the context.
.. note:: If an exception is being handled when we leave the context a
warning message including traceback is logged *before* system
logging is disabled.
"""
if self.handler is not None:
if exc_type is not None:
logger.warning("Disabling system logging due to unhandled exception!", exc_info=True)
(self.kw.get('logger') or logging.getLogger()).removeHandler(self.handler)
self.handler = None
def enable_system_logging(programname=None, fmt=None, logger=None, reconfigure=True, **kw):
"""
Redirect :mod:`logging` messages to the system log (e.g. ``/var/log/syslog``).
:param programname: The program name to embed in log messages (a string, defaults
to the result of :func:`~coloredlogs.find_program_name()`).
:param fmt: The log format for system log messages (a string, defaults to
:data:`DEFAULT_LOG_FORMAT`).
:param logger: The logger to which the :class:`~logging.handlers.SysLogHandler`
should be connected (defaults to the root logger).
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:param reconfigure: If :data:`True` (the default) multiple calls to
:func:`enable_system_logging()` will each override
the previous configuration.
:param kw: Refer to :func:`connect_to_syslog()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or
:data:`None`. If an existing handler is found and `reconfigure`
is :data:`False` the existing handler object is returned. If the
connection to the system logging daemon fails :data:`None` is
returned.
As of release 15.0 this function uses :func:`is_syslog_supported()` to
check whether system logging is supported and appropriate before it's
enabled.
.. note:: When the logger's effective level is too restrictive it is
relaxed (refer to `notes about log levels`_ for details).
"""
# Check whether system logging is supported / appropriate.
if not is_syslog_supported():
return None
# Provide defaults for omitted arguments.
programname = programname or find_program_name()
logger = logger or logging.getLogger()
fmt = fmt or DEFAULT_LOG_FORMAT
level = level_to_number(kw.get('level', DEFAULT_LOG_LEVEL))
# Check whether system logging is already enabled.
handler, logger = replace_handler(logger, match_syslog_handler, reconfigure)
# Make sure reconfiguration is allowed or not relevant.
if not (handler and not reconfigure):
# Create a system logging handler.
handler = connect_to_syslog(**kw)
# Make sure the handler was successfully created.
if handler:
# Enable the use of %(programname)s.
ProgramNameFilter.install(handler=handler, fmt=fmt, programname=programname)
# Connect the formatter, handler and logger.
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
# Adjust the level of the selected logger.
adjust_level(logger, level)
return handler
def connect_to_syslog(address=None, facility=None, level=None):
"""
Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- The following two socket types are tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
"""
if not address:
address = find_syslog_address()
if facility is None:
facility = logging.handlers.SysLogHandler.LOG_USER
if level is None:
level = DEFAULT_LOG_LEVEL
for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None:
kw = dict(facility=facility, address=address)
if socktype is not None:
kw['socktype'] = socktype
try:
handler = logging.handlers.SysLogHandler(**kw)
except IOError:
# IOError is a superclass of socket.error which can be raised if the system
# logging daemon is unavailable.
pass
else:
handler.setLevel(level_to_number(level))
return handler
def find_syslog_address():
"""
Find the most suitable destination for system log messages.
:returns: The pathname of a log device (a string) or an address/port tuple as
supported by :class:`~logging.handlers.SysLogHandler`.
On Mac OS X this prefers :data:`LOG_DEVICE_MACOSX`, after that :data:`LOG_DEVICE_UNIX`
is checked for existence. If both of these device files don't exist the default used
by :class:`~logging.handlers.SysLogHandler` is returned.
"""
if sys.platform == 'darwin' and os.path.exists(LOG_DEVICE_MACOSX):
return LOG_DEVICE_MACOSX
elif os.path.exists(LOG_DEVICE_UNIX):
return LOG_DEVICE_UNIX
else:
return 'localhost', logging.handlers.SYSLOG_UDP_PORT
def is_syslog_supported():
"""
Determine whether system logging is supported.
:returns:
:data:`True` if system logging is supported and can be enabled,
:data:`False` if system logging is not supported or there are good
reasons for not enabling it.
The decision making process here is as follows:
Override
If the environment variable ``$COLOREDLOGS_SYSLOG`` is set it is evaluated
using :func:`~humanfriendly.coerce_boolean()` and the resulting value
overrides the platform detection discussed below, this allows users to
override the decision making process if they disagree / know better.
Linux / UNIX
On systems that are not Windows or MacOS (see below) we assume UNIX which
means either syslog is available or sending a bunch of UDP packets to
nowhere won't hurt anyone...
Microsoft Windows
Over the years I've had multiple reports of :pypi:`coloredlogs` spewing
extremely verbose errno 10057 warning messages to the console (once for
each log message I suppose) so I now assume it a default that
"syslog-style system logging" is not generally available on Windows.
Apple MacOS
There's cPython issue `#38780`_ which seems to result in a fatal exception
when the Python interpreter shuts down. This is (way) worse than not
having system logging enabled. The error message mentioned in `#38780`_
has actually been following me around for years now, see for example:
- https://github.com/xolox/python-rotate-backups/issues/9 mentions Docker
images implying Linux, so not strictly the same as `#38780`_.
- https://github.com/xolox/python-npm-accel/issues/4 is definitely related
to `#38780`_ and is what eventually prompted me to add the
:func:`is_syslog_supported()` logic.
.. _#38780: https://bugs.python.org/issue38780
"""
override = os.environ.get("COLOREDLOGS_SYSLOG")
if override is not None:
return coerce_boolean(override)
else:
return not (on_windows() or on_macos())
def match_syslog_handler(handler):
"""
Identify system logging handlers.
:param handler: The :class:`~logging.Handler` class to check.
:returns: :data:`True` if the handler is a
:class:`~logging.handlers.SysLogHandler`,
:data:`False` otherwise.
This function can be used as a callback for :func:`.find_handler()`.
"""
return isinstance(handler, logging.handlers.SysLogHandler)
| xolox/python-coloredlogs | coloredlogs/syslog.py | Python | mit | 11,849 | 0.001772 |
#
# Copyright 2008 Huang Ying <huang.ying.caritas@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import os
import tempfile
import Image, ImageFilter
import config
from util import *
class PageImageRef(object):
def __init__(self, page_num, sub_page_num = 0,
image = None, file_name = None):
object.__init__(self)
self.page_num = page_num
self.sub_page_num = sub_page_num
self.image = image
self.file_name = file_name
def __del__(self):
if self.file_name and not config.debug:
os.unlink(self.file_name)
def clear(self):
self.file_name = None
self.image = None
def derive(self, image = None, file_name = None):
return PageImageRef(self.page_num, self.sub_page_num,
image, file_name)
def get_image(self):
if self.image:
return self.image
elif self.file_name:
self.image = Image.open(self.file_name)
return self.image
def get_file_name(self, ext = 'pgm'):
if self.file_name:
return self.file_name
self.file_name = temp_file_name('.'+ext)
if self.image:
self.image.save(self.file_name)
return self.file_name
class NullConv(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
return pimg_ref
class PreCrop(object):
def __init__(self, config):
object.__init__(self)
self.trim_left = config.trim_left
self.trim_top = config.trim_top
self.trim_right = config.trim_right
self.trim_bottom = config.trim_bottom
def convert(self, pimg_ref, out_file_name = None):
if self.trim_left < 0.01 and self.trim_top < 0.01 and \
self.trim_right < 0.01 and self.trim_bottom < 0.01:
return pimg_ref
img = pimg_ref.get_image()
iw, ih = img.size
left = nround(self.trim_left * iw)
right = iw - nround(self.trim_right * iw)
top = nround(self.trim_top * ih)
bottom = ih - nround(self.trim_bottom * ih)
img = img.crop((left, top, right, bottom))
return pimg_ref.derive(img)
class Dilate(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
img = pimg_ref.get_image()
img = img.filter(ImageFilter.MinFilter(3))
return pimg_ref.derive(img)
def create_dilate(config):
if config.dilate:
return Dilate(config)
else:
return NullConv(config)
class Unpaper(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
if out_file_name is None:
out_file_name = temp_file_name('.pgm')
check_call(['unpaper', '-q', '--no-deskew',
pimg_ref.get_file_name(), out_file_name])
return pimg_ref.derive(file_name = out_file_name)
class RowCondense(object):
def __init__(self, config):
object.__init__(self)
self.unpaper_keep_size = config.unpaper_keep_size
def convert(self, pimg_ref, out_file_name = None):
img = pimg_ref.get_image()
iw, ih = img.size
ethr = max(ih/500, 1)
def not_empty(h):
return sum(h[:-32]) > ethr
top = 0
bottom = ih
left = -1
right = iw
for x in range(1, iw+1):
ir = img.crop((x - 1, 0, x, ih))
if not_empty(ir.histogram()):
left = x - 1
break
if left == -1:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, 2, ih))
return pimg_ref.derive(nimg)
for x in range(left, iw-1, -1):
ir = img.crop((x, 0, x+1, ih))
if not_empty(ir.histogram()):
right = x+1
break
rows = []
pe = True
for y in range(1, ih+1):
ic = img.crop((left, y-1, right, y))
ce = not not_empty(ic.histogram())
if pe != ce:
rows.append(y-1)
pe = ce
if not pe:
rows.append(ih)
if len(rows) == 0:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, 2, ih))
return pimg_ref.derive(nimg)
minh_empty = max(ih / 100, 5)
for i in range(len(rows)-3, 1, -2):
if rows[i+1] - rows[i] < minh_empty:
del rows[i+1]
del rows[i]
minh_ink = max(ih / 100, 5)
nh = 0
for i in range(0, len(rows) - 2, 2):
inkh = rows[i+1] - rows[i]
ninkh = rows[i+3] - rows[i+2]
nh = nh + inkh
if inkh < minh_ink or ninkh < minh_ink:
nh = nh + minh_empty
else:
nh = nh + rows[i+2] - rows[i+1]
nh += rows[-1] - rows[-2]
nw = right - left
if self.unpaper_keep_size:
nw, nh = iw, ih
nimg = Image.new("L", (nw, nh))
nimg.paste(255, [0, 0, nw, nh])
else:
nimg = Image.new("L", (nw, nh))
cy = 0
for i in range(0, len(rows) - 2, 2):
inkh = rows[i+1] - rows[i]
ninkh = rows[i+3] - rows[i+2]
nimg.paste(img.crop((left, rows[i], right, rows[i+1])), (0, cy))
cy = cy + inkh
if inkh < minh_ink or ninkh < minh_ink:
eh = minh_empty
else:
eh = rows[i+2] - rows[i+1]
nimg.paste(255, (0, cy, nw, cy + eh))
cy = cy + eh
nimg.paste(img.crop((left, rows[-2], right, rows[-1])), (0, cy))
return pimg_ref.derive(nimg)
class ColumnCondense(object):
def __init__(self, config):
object.__init__(self)
self.unpaper_keep_size = config.unpaper_keep_size
def convert(self, pimg_ref, out_file_name = None):
img = pimg_ref.get_image()
iw, ih = img.size
ethr = max(iw/500, 1)
def not_empty(h):
return sum(h[:-32]) > ethr
top = -1
bottom = ih
left = 0
right = iw
for y in range(1, ih+1):
ir = img.crop((0, y - 1, iw, y))
if not_empty(ir.histogram()):
top = y - 1
break
if top == -1:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, iw, 2))
return pimg_ref.derive(nimg)
for y in range(ih-1, top, -1):
ir = img.crop((0, y, iw, y+1))
if not_empty(ir.histogram()):
bottom = y+1
break
cols = []
pe = True
for x in range(1, iw+1):
ic = img.crop((x-1, top, x, bottom))
ce = not not_empty(ic.histogram())
if pe != ce:
cols.append(x-1)
pe = ce
if not pe:
cols.append(iw)
if len(cols) == 0:
if self.unpaper_keep_size:
nimg = img
else:
nimg = img.crop((0, 0, iw, 2))
return pimg_ref.derive(nimg)
minw_empty = max(iw / 100, 5)
for i in range(len(cols)-3, 1, -2):
if cols[i+1] - cols[i] < minw_empty:
del cols[i+1]
del cols[i]
minw_ink = max(iw / 100, 5)
nw = 0
for i in range(0, len(cols) - 2, 2):
inkw = cols[i+1] - cols[i]
ninkw = cols[i+3] - cols[i+2]
nw = nw + inkw
if inkw < minw_ink or ninkw < minw_ink:
nw = nw + minw_empty
else:
nw = nw + cols[i+2] - cols[i+1]
nw += cols[-1] - cols[-2]
nh = bottom - top
if self.unpaper_keep_size:
nw, nh = iw, ih
nimg = Image.new("L", (nw, nh))
nimg.paste(255, [0, 0, nw, nh])
else:
nimg = Image.new("L", (nw, nh))
cx = 0
for i in range(0, len(cols) - 2, 2):
inkw = cols[i+1] - cols[i]
ninkw = cols[i+3] - cols[i+2]
nimg.paste(img.crop((cols[i], top, cols[i+1], bottom)),
(cx, 0))
cx = cx + inkw
if inkw < minw_ink or ninkw < minw_ink:
ew = minw_empty
else:
ew = cols[i+2] - cols[i+1]
nimg.paste(255, (cx, 0, cx + ew, nh))
cx = cx + ew
nimg.paste(img.crop((cols[-2], top, cols[-1], bottom)),
(cx, 0))
return pimg_ref.derive(nimg)
class RowColumnCondense(object):
def __init__(self, config):
object.__init__(self)
self.rc = RowCondense(config)
self.cc = ColumnCondense(config)
def convert(self, pimg_ref, out_file_name = None):
pimg_ref = self.cc.convert(pimg_ref)
return self.rc.convert(pimg_ref)
def create_unpaper(config):
if config.unpaper == 'cc':
return ColumnCondense(config)
elif config.unpaper == 'rc':
return RowCondense(config)
elif config.unpaper == 'rcc':
return RowColumnCondense(config)
elif config.unpaper == 'up':
return Unpaper(config)
else:
return NullConv(config)
class PostProc(object):
def __init__(self, config):
object.__init__(self)
self.colors = config.colors
self.rotate = config.rotate
self.gamma = config.gamma
def convert(self, pimg_ref, out_file_name = None):
def do_gamma(infn):
gmfn = temp_file_name('.png')
cmd = ['convert', '-gamma', '%.2f' % (self.gamma,),
'-depth', '8', infn, gmfn]
check_call(cmd)
return gmfn
if self.gamma > 0.001:
gmfn = do_gamma(pimg_ref.get_file_name())
pimg_ref = pimg_ref.derive(file_name = gmfn)
if out_file_name is None:
out_file_name = temp_file_name('.png')
proc = False
cmd = ['convert']
if self.colors < 256:
scolors = '%d' % (self.colors,)
cmd.extend(['-colors', scolors])
proc = True
if self.rotate:
cmd.extend(['-rotate', '-90'])
proc = True
if not proc:
return pimg_ref
cmd.extend(['-depth', '8', pimg_ref.get_file_name(), out_file_name])
check_call(cmd)
return pimg_ref.derive(file_name = out_file_name)
class FixBlackWhite(object):
def __init__(self, config):
object.__init__(self)
def convert(self, pimg_ref, out_file_name = None):
def fix(p):
if p < 10:
return 0
if p > 245:
return 255
return p
if out_file_name is None:
out_file_name = temp_file_name('.png')
img = pimg_ref.get_image()
img = img.point(fix)
return pimg_ref.derive(img)
class Collector(object):
def __init__(self, config):
object.__init__(self)
self.page_map = {}
self.out_files = []
self.output_prefix = "%s/out" % (config.tmp_dir,)
self.first_page = config.first_page
self.last_page = config.last_page
def collect(self, pimg_ref):
in_file_name = pimg_ref.get_file_name('png')
ext = in_file_name[-3:]
pn = pimg_ref.page_num
out_file_name = '%s-%06d-%02d.%s' % (self.output_prefix,
pn, pimg_ref.sub_page_num, ext)
os.rename(in_file_name, out_file_name)
pimg_ref.clear()
self.out_files.append(out_file_name)
if not self.page_map.has_key(pn):
self.page_map[pn] = len(self.out_files)
def end(self):
pm = self.page_map
nopn = len(self.out_files)
for pn in range(self.last_page, self.first_page, -1):
if pm.has_key(pn):
nopn = pm[pn]
else:
pm[pn] = nopn
| hying-caritas/ibsuite | ibpy/ibpy/image.py | Python | gpl-2.0 | 12,386 | 0.00549 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.