text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from PyQt4.QtCore import QObject, pyqtSignal
from ems.notification.abstract import FormNotifier
from ems.qt4.gui.widgets.balloontip import BalloonTip
from ems import qt4
from ems.qt4.util import variant_to_pyobject as py
class BalloonFormNotifier(FormNotifier):
def __init__(self):
self._widgetMap = {}
self._balloons = {}
self._defaultState = BalloonTip.ERROR
self._model = None
self._currentModelRow = 0
def map(self, key, widget):
self._widgetMap[key] = widget
self._balloons[key] = BalloonTip(widget)
self._balloons[key].setArrowAtLeft(True)
self._balloons[key].setArrowAtTop(False)
def mapAll(self, widgetDict):
for fieldName in widgetDict:
self.map(fieldName, widgetDict[fieldName])
def showMessage(self, key, message, state=None):
state = self._defaultState if state is None else state
if not key in self._balloons:
return
if not len(message):
self._balloons[key].setMessage(message)
self._balloons[key].hide()
return
self._balloons[key].setState(state)
self._balloons[key].setMessage(message)
self._balloons[key].show()
def clearMessages(self):
for key in self._balloons:
self._balloons[key].setMessage('')
self._balloons[key].hide()
def getModel(self):
return self._model
def setModel(self, model):
self._connectToModel(model)
self._model = model
self._updateMessagesFromModel()
model = property(getModel, setModel)
def getCurrentModelRow(self):
return self._currentModelRow
def setCurrentModelRow(self, row):
self._currentModelRow = row
self._updateMessagesFromModel()
currentModelRow = property(getCurrentModelRow, setCurrentModelRow)
def _connectToModel(self, model):
model.messageChanged.connect(self._onModelMessageChanged)
model.messagesCleared.connect(self._onModelMessageCleared)
def _onModelMessageChanged(self, row, column, message):
if row != self._currentModelRow:
return
keyName = py(self._model.index(row, column).data(qt4.ColumnNameRole))
self.showMessage(keyName, message)
def _onModelMessageCleared(self, row):
if row != self._currentModelRow:
return
self.clearMessages()
def _updateMessagesFromModel(self):
self.clearMessages()
row = self._currentModelRow
for column in range(self._model.columnCount()):
keyName = py(self._model.index(row, column).data(qt4.ColumnNameRole))
self.showMessage(keyName, self._model.columnMessage(row, column)) | mtils/ems | ems/qt4/gui/notification/balloon_form_notifier.py | Python | mit | 2,764 | 0.002171 |
# domain_backup
#
# Copyright Andrew Bartlett <abartlet@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import os
import sys
import tarfile
import logging
import shutil
import tempfile
import samba
import tdb
import samba.getopt as options
from samba.samdb import SamDB, get_default_backend_store
import ldb
from samba.samba3 import libsmb_samba_internal as libsmb
from samba.samba3 import param as s3param
from samba.ntacls import backup_online, backup_restore, backup_offline
from samba.auth import system_session
from samba.join import DCJoinContext, join_clone, DCCloneAndRenameContext
from samba.dcerpc.security import dom_sid
from samba.netcmd import Option, CommandError
from samba.dcerpc import misc, security, drsblobs
from samba import Ldb
from . fsmo import cmd_fsmo_seize
from samba.provision import make_smbconf, DEFAULTSITE
from samba.upgradehelpers import update_krbtgt_account_password
from samba.remove_dc import remove_dc
from samba.provision import secretsdb_self_join
from samba.dbchecker import dbcheck
import re
from samba.provision import guess_names, determine_host_ip, determine_host_ip6
from samba.provision.sambadns import (fill_dns_data_partitions,
get_dnsadmins_sid,
get_domainguid)
from samba.tdb_util import tdb_copy
from samba.mdb_util import mdb_copy
import errno
from subprocess import CalledProcessError
from samba import sites
from samba.dsdb import _dsdb_load_udv_v2
from samba.ndr import ndr_pack
from samba.credentials import SMB_SIGNING_REQUIRED
# work out a SID (based on a free RID) to use when the domain gets restored.
# This ensures that the restored DC's SID won't clash with any other RIDs
# already in use in the domain
def get_sid_for_restore(samdb, logger):
# Find the DN of the RID set of the server
res = samdb.search(base=ldb.Dn(samdb, samdb.get_serverName()),
scope=ldb.SCOPE_BASE, attrs=["serverReference"])
server_ref_dn = ldb.Dn(samdb, str(res[0]['serverReference'][0]))
res = samdb.search(base=server_ref_dn,
scope=ldb.SCOPE_BASE,
attrs=['rIDSetReferences'])
rid_set_dn = ldb.Dn(samdb, str(res[0]['rIDSetReferences'][0]))
# Get the alloc pools and next RID of the RID set
res = samdb.search(base=rid_set_dn,
scope=ldb.SCOPE_SUBTREE,
expression="(rIDNextRID=*)",
attrs=['rIDAllocationPool',
'rIDPreviousAllocationPool',
'rIDNextRID'])
# Decode the bounds of the RID allocation pools
try:
rid = int(res[0].get('rIDNextRID')[0])
except IndexError:
logger.info("The RID pool for this DC is not initalized "
"(e.g. it may be a fairly new DC).")
logger.info("To initialize it, create a temporary user on this DC "
"(you can delete it later).")
raise CommandError("Cannot create backup - "
"please initialize this DC's RID pool first.")
def split_val(num):
high = (0xFFFFFFFF00000000 & int(num)) >> 32
low = 0x00000000FFFFFFFF & int(num)
return low, high
pool_l, pool_h = split_val(res[0].get('rIDPreviousAllocationPool')[0])
npool_l, npool_h = split_val(res[0].get('rIDAllocationPool')[0])
# Calculate next RID based on pool bounds
if rid == npool_h:
raise CommandError('Out of RIDs, finished AllocPool')
if rid == pool_h:
if pool_h == npool_h:
raise CommandError('Out of RIDs, finished PrevAllocPool.')
rid = npool_l
else:
rid += 1
# Construct full SID
sid = dom_sid(samdb.get_domain_sid())
return str(sid) + '-' + str(rid)
def smb_sysvol_conn(server, lp, creds):
"""Returns an SMB connection to the sysvol share on the DC"""
# the SMB bindings rely on having a s3 loadparm
s3_lp = s3param.get_context()
s3_lp.load(lp.configfile)
# Force signing for the connection
saved_signing_state = creds.get_smb_signing()
creds.set_smb_signing(SMB_SIGNING_REQUIRED)
conn = libsmb.Conn(server, "sysvol", lp=s3_lp, creds=creds)
# Reset signing state
creds.set_smb_signing(saved_signing_state)
return conn
def get_timestamp():
return datetime.datetime.now().isoformat().replace(':', '-')
def backup_filepath(targetdir, name, time_str):
filename = 'samba-backup-%s-%s.tar.bz2' % (name, time_str)
return os.path.join(targetdir, filename)
def create_backup_tar(logger, tmpdir, backup_filepath):
# Adds everything in the tmpdir into a new tar file
logger.info("Creating backup file %s..." % backup_filepath)
tf = tarfile.open(backup_filepath, 'w:bz2')
tf.add(tmpdir, arcname='./')
tf.close()
def create_log_file(targetdir, lp, backup_type, server, include_secrets,
extra_info=None):
# create a summary file about the backup, which will get included in the
# tar file. This makes it easy for users to see what the backup involved,
# without having to untar the DB and interrogate it
f = open(os.path.join(targetdir, "backup.txt"), 'w')
try:
time_str = datetime.datetime.now().strftime('%Y-%b-%d %H:%M:%S')
f.write("Backup created %s\n" % time_str)
f.write("Using samba-tool version: %s\n" % lp.get('server string'))
f.write("Domain %s backup, using DC '%s'\n" % (backup_type, server))
f.write("Backup for domain %s (NetBIOS), %s (DNS realm)\n" %
(lp.get('workgroup'), lp.get('realm').lower()))
f.write("Backup contains domain secrets: %s\n" % str(include_secrets))
if extra_info:
f.write("%s\n" % extra_info)
finally:
f.close()
# Add a backup-specific marker to the DB with info that we'll use during
# the restore process
def add_backup_marker(samdb, marker, value):
m = ldb.Message()
m.dn = ldb.Dn(samdb, "@SAMBA_DSDB")
m[marker] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, marker)
samdb.modify(m)
def check_targetdir(logger, targetdir):
if targetdir is None:
raise CommandError('Target directory required')
if not os.path.exists(targetdir):
logger.info('Creating targetdir %s...' % targetdir)
os.makedirs(targetdir)
elif not os.path.isdir(targetdir):
raise CommandError("%s is not a directory" % targetdir)
# For '--no-secrets' backups, this sets the Administrator user's password to a
# randomly-generated value. This is similar to the provision behaviour
def set_admin_password(logger, samdb):
"""Sets a randomly generated password for the backup DB's admin user"""
# match the admin user by RID
domainsid = samdb.get_domain_sid()
match_admin = "(objectsid=%s-%s)" % (domainsid,
security.DOMAIN_RID_ADMINISTRATOR)
search_expr = "(&(objectClass=user)%s)" % (match_admin,)
# retrieve the admin username (just in case it's been renamed)
res = samdb.search(base=samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_expr)
username = str(res[0]['samaccountname'])
adminpass = samba.generate_random_password(12, 32)
logger.info("Setting %s password in backup to: %s" % (username, adminpass))
logger.info("Run 'samba-tool user setpassword %s' after restoring DB" %
username)
samdb.setpassword(search_expr, adminpass, force_change_at_next_login=False,
username=username)
class cmd_domain_backup_online(samba.netcmd.Command):
'''Copy a running DC's current DB into a backup tar file.
Takes a backup copy of the current domain from a running DC. If the domain
were to undergo a catastrophic failure, then the backup file can be used to
recover the domain. The backup created is similar to the DB that a new DC
would receive when it joins the domain.
Note that:
- it's recommended to run 'samba-tool dbcheck' before taking a backup-file
and fix any errors it reports.
- all the domain's secrets are included in the backup file.
- although the DB contents can be untarred and examined manually, you need
to run 'samba-tool domain backup restore' before you can start a Samba DC
from the backup file.'''
synopsis = "%prog --server=<DC-to-backup> --targetdir=<output-dir>"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("--server", help="The DC to backup", type=str),
Option("--targetdir", type=str,
help="Directory to write the backup file to"),
Option("--no-secrets", action="store_true", default=False,
help="Exclude secret values from the backup created"),
Option("--backend-store", type="choice", metavar="BACKENDSTORE",
choices=["tdb", "mdb"],
help="Specify the database backend to be used "
"(default is %s)" % get_default_backend_store()),
]
def run(self, sambaopts=None, credopts=None, server=None, targetdir=None,
no_secrets=False, backend_store=None):
logger = self.get_logger()
logger.setLevel(logging.DEBUG)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
# Make sure we have all the required args.
if server is None:
raise CommandError('Server required')
check_targetdir(logger, targetdir)
tmpdir = tempfile.mkdtemp(dir=targetdir)
# Run a clone join on the remote
include_secrets = not no_secrets
try:
ctx = join_clone(logger=logger, creds=creds, lp=lp,
include_secrets=include_secrets, server=server,
dns_backend='SAMBA_INTERNAL', targetdir=tmpdir,
backend_store=backend_store)
# get the paths used for the clone, then drop the old samdb connection
paths = ctx.paths
del ctx
# Get a free RID to use as the new DC's SID (when it gets restored)
remote_sam = SamDB(url='ldap://' + server, credentials=creds,
session_info=system_session(), lp=lp)
new_sid = get_sid_for_restore(remote_sam, logger)
realm = remote_sam.domain_dns_name()
# Grab the remote DC's sysvol files and bundle them into a tar file
logger.info("Backing up sysvol files (via SMB)...")
sysvol_tar = os.path.join(tmpdir, 'sysvol.tar.gz')
smb_conn = smb_sysvol_conn(server, lp, creds)
backup_online(smb_conn, sysvol_tar, remote_sam.get_domain_sid())
# remove the default sysvol files created by the clone (we want to
# make sure we restore the sysvol.tar.gz files instead)
shutil.rmtree(paths.sysvol)
# Edit the downloaded sam.ldb to mark it as a backup
samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp)
time_str = get_timestamp()
add_backup_marker(samdb, "backupDate", time_str)
add_backup_marker(samdb, "sidForRestore", new_sid)
add_backup_marker(samdb, "backupType", "online")
# ensure the admin user always has a password set (same as provision)
if no_secrets:
set_admin_password(logger, samdb)
# Add everything in the tmpdir to the backup tar file
backup_file = backup_filepath(targetdir, realm, time_str)
create_log_file(tmpdir, lp, "online", server, include_secrets)
create_backup_tar(logger, tmpdir, backup_file)
finally:
shutil.rmtree(tmpdir)
class cmd_domain_backup_restore(cmd_fsmo_seize):
'''Restore the domain's DB from a backup-file.
This restores a previously backed up copy of the domain's DB on a new DC.
Note that the restored DB will not contain the original DC that the backup
was taken from (or any other DCs in the original domain). Only the new DC
(specified by --newservername) will be present in the restored DB.
Samba can then be started against the restored DB. Any existing DCs for the
domain should be shutdown before the new DC is started. Other DCs can then
be joined to the new DC to recover the network.
Note that this command should be run as the root user - it will fail
otherwise.'''
synopsis = ("%prog --backup-file=<tar-file> --targetdir=<output-dir> "
"--newservername=<DC-name>")
takes_options = [
Option("--backup-file", help="Path to backup file", type=str),
Option("--targetdir", help="Path to write to", type=str),
Option("--newservername", help="Name for new server", type=str),
Option("--host-ip", type="string", metavar="IPADDRESS",
help="set IPv4 ipaddress"),
Option("--host-ip6", type="string", metavar="IP6ADDRESS",
help="set IPv6 ipaddress"),
Option("--site", help="Site to add the new server in", type=str),
]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
}
def register_dns_zone(self, logger, samdb, lp, ntdsguid, host_ip,
host_ip6, site):
'''
Registers the new realm's DNS objects when a renamed domain backup
is restored.
'''
names = guess_names(lp)
domaindn = names.domaindn
forestdn = samdb.get_root_basedn().get_linearized()
dnsdomain = names.dnsdomain.lower()
dnsforest = dnsdomain
hostname = names.netbiosname.lower()
domainsid = dom_sid(samdb.get_domain_sid())
dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn)
domainguid = get_domainguid(samdb, domaindn)
# work out the IP address to use for the new DC's DNS records
host_ip = determine_host_ip(logger, lp, host_ip)
host_ip6 = determine_host_ip6(logger, lp, host_ip6)
if host_ip is None and host_ip6 is None:
raise CommandError('Please specify a host-ip for the new server')
logger.info("DNS realm was renamed to %s" % dnsdomain)
logger.info("Populating DNS partitions for new realm...")
# Add the DNS objects for the new realm (note: the backup clone already
# has the root server objects, so don't add them again)
fill_dns_data_partitions(samdb, domainsid, site, domaindn,
forestdn, dnsdomain, dnsforest, hostname,
host_ip, host_ip6, domainguid, ntdsguid,
dnsadmins_sid, add_root=False)
def fix_old_dc_references(self, samdb):
'''Fixes attributes that reference the old/removed DCs'''
# we just want to fix up DB problems here that were introduced by us
# removing the old DCs. We restrict what we fix up so that the restored
# DB matches the backed-up DB as close as possible. (There may be other
# DB issues inherited from the backed-up DC, but it's not our place to
# silently try to fix them here).
samdb.transaction_start()
chk = dbcheck(samdb, quiet=True, fix=True, yes=False,
in_transaction=True)
# fix up stale references to the old DC
setattr(chk, 'fix_all_old_dn_string_component_mismatch', 'ALL')
attrs = ['lastKnownParent', 'interSiteTopologyGenerator']
# fix-up stale one-way links that point to the old DC
setattr(chk, 'remove_plausible_deleted_DN_links', 'ALL')
attrs += ['msDS-NC-Replica-Locations']
cross_ncs_ctrl = 'search_options:1:2'
controls = ['show_deleted:1', cross_ncs_ctrl]
chk.check_database(controls=controls, attrs=attrs)
samdb.transaction_commit()
def create_default_site(self, samdb, logger):
'''Creates the default site, if it doesn't already exist'''
sitename = DEFAULTSITE
search_expr = "(&(cn={0})(objectclass=site))".format(sitename)
res = samdb.search(samdb.get_config_basedn(), scope=ldb.SCOPE_SUBTREE,
expression=search_expr)
if len(res) == 0:
logger.info("Creating default site '{0}'".format(sitename))
sites.create_site(samdb, samdb.get_config_basedn(), sitename)
return sitename
def remove_backup_markers(self, samdb):
"""Remove DB markers added by the backup process"""
# check what markers we need to remove (this may vary)
markers = ['sidForRestore', 'backupRename', 'backupDate', 'backupType']
res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
scope=ldb.SCOPE_BASE,
attrs=markers)
# remove any markers that exist in the DB
m = ldb.Message()
m.dn = ldb.Dn(samdb, "@SAMBA_DSDB")
for attr in markers:
if attr in res[0]:
m[attr] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attr)
samdb.modify(m)
def get_backup_type(self, samdb):
res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
scope=ldb.SCOPE_BASE,
attrs=['backupRename', 'backupType'])
# note that the backupType marker won't exist on backups created on
# v4.9. However, we can still infer the type, as only rename and
# online backups are supported on v4.9
if 'backupType' in res[0]:
backup_type = str(res[0]['backupType'])
elif 'backupRename' in res[0]:
backup_type = "rename"
else:
backup_type = "online"
return backup_type
def save_uptodate_vectors(self, samdb, partitions):
"""Ensures the UTDV used by DRS is correct after an offline backup"""
for nc in partitions:
# load the replUpToDateVector we *should* have
utdv = _dsdb_load_udv_v2(samdb, nc)
# convert it to NDR format and write it into the DB
utdv_blob = drsblobs.replUpToDateVectorBlob()
utdv_blob.version = 2
utdv_blob.ctr.cursors = utdv
utdv_blob.ctr.count = len(utdv)
new_value = ndr_pack(utdv_blob)
m = ldb.Message()
m.dn = ldb.Dn(samdb, nc)
m["replUpToDateVector"] = ldb.MessageElement(new_value,
ldb.FLAG_MOD_REPLACE,
"replUpToDateVector")
samdb.modify(m)
def run(self, sambaopts=None, credopts=None, backup_file=None,
targetdir=None, newservername=None, host_ip=None, host_ip6=None,
site=None):
if not (backup_file and os.path.exists(backup_file)):
raise CommandError('Backup file not found.')
if targetdir is None:
raise CommandError('Please specify a target directory')
# allow restoredc to install into a directory prepopulated by selftest
if (os.path.exists(targetdir) and os.listdir(targetdir) and
os.environ.get('SAMBA_SELFTEST') != '1'):
raise CommandError('Target directory is not empty')
if not newservername:
raise CommandError('Server name required')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
# ldapcmp prefers the server's netBIOS name in upper-case
newservername = newservername.upper()
# extract the backup .tar to a temp directory
targetdir = os.path.abspath(targetdir)
tf = tarfile.open(backup_file)
tf.extractall(targetdir)
tf.close()
# use the smb.conf that got backed up, by default (save what was
# actually backed up, before we mess with it)
smbconf = os.path.join(targetdir, 'etc', 'smb.conf')
shutil.copyfile(smbconf, smbconf + ".orig")
# if a smb.conf was specified on the cmd line, then use that instead
cli_smbconf = sambaopts.get_loadparm_path()
if cli_smbconf:
logger.info("Using %s as restored domain's smb.conf" % cli_smbconf)
shutil.copyfile(cli_smbconf, smbconf)
lp = samba.param.LoadParm()
lp.load(smbconf)
# open a DB connection to the restored DB
private_dir = os.path.join(targetdir, 'private')
samdb_path = os.path.join(private_dir, 'sam.ldb')
samdb = SamDB(url=samdb_path, session_info=system_session(), lp=lp)
backup_type = self.get_backup_type(samdb)
if site is None:
# There's no great way to work out the correct site to add the
# restored DC to. By default, add it to Default-First-Site-Name,
# creating the site if it doesn't already exist
site = self.create_default_site(samdb, logger)
logger.info("Adding new DC to site '{0}'".format(site))
# read the naming contexts out of the DB
res = samdb.search(base="", scope=ldb.SCOPE_BASE,
attrs=['namingContexts'])
ncs = [str(r) for r in res[0].get('namingContexts')]
# for offline backups we need to make sure the upToDateness info
# contains the invocation-ID and highest-USN of the DC we backed up.
# Otherwise replication propagation dampening won't correctly filter
# objects created by that DC
if backup_type == "offline":
self.save_uptodate_vectors(samdb, ncs)
# Create account using the join_add_objects function in the join object
# We need namingContexts, account control flags, and the sid saved by
# the backup process.
creds = credopts.get_credentials(lp)
ctx = DCJoinContext(logger, creds=creds, lp=lp, site=site,
forced_local_samdb=samdb,
netbios_name=newservername)
ctx.nc_list = ncs
ctx.full_nc_list = ncs
ctx.userAccountControl = (samba.dsdb.UF_SERVER_TRUST_ACCOUNT |
samba.dsdb.UF_TRUSTED_FOR_DELEGATION)
# rewrite the smb.conf to make sure it uses the new targetdir settings.
# (This doesn't update all filepaths in a customized config, but it
# corrects the same paths that get set by a new provision)
logger.info('Updating basic smb.conf settings...')
make_smbconf(smbconf, newservername, ctx.domain_name,
ctx.realm, targetdir, lp=lp,
serverrole="active directory domain controller")
# Get the SID saved by the backup process and create account
res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
scope=ldb.SCOPE_BASE,
attrs=['sidForRestore'])
sid = res[0].get('sidForRestore')[0]
logger.info('Creating account with SID: ' + str(sid))
ctx.join_add_objects(specified_sid=dom_sid(str(sid)))
m = ldb.Message()
m.dn = ldb.Dn(samdb, '@ROOTDSE')
ntds_guid = str(ctx.ntds_guid)
m["dsServiceName"] = ldb.MessageElement("<GUID=%s>" % ntds_guid,
ldb.FLAG_MOD_REPLACE,
"dsServiceName")
samdb.modify(m)
# if we renamed the backed-up domain, then we need to add the DNS
# objects for the new realm (we do this in the restore, now that we
# know the new DC's IP address)
if backup_type == "rename":
self.register_dns_zone(logger, samdb, lp, ctx.ntds_guid,
host_ip, host_ip6, site)
secrets_path = os.path.join(private_dir, 'secrets.ldb')
secrets_ldb = Ldb(secrets_path, session_info=system_session(), lp=lp)
secretsdb_self_join(secrets_ldb, domain=ctx.domain_name,
realm=ctx.realm, dnsdomain=ctx.dnsdomain,
netbiosname=ctx.myname, domainsid=ctx.domsid,
machinepass=ctx.acct_pass,
key_version_number=ctx.key_version_number,
secure_channel_type=misc.SEC_CHAN_BDC)
# Seize DNS roles
domain_dn = samdb.domain_dn()
forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name())
domaindns_dn = ("CN=Infrastructure,DC=DomainDnsZones,", domain_dn)
forestdns_dn = ("CN=Infrastructure,DC=ForestDnsZones,", forest_dn)
for dn_prefix, dns_dn in [forestdns_dn, domaindns_dn]:
if dns_dn not in ncs:
continue
full_dn = dn_prefix + dns_dn
m = ldb.Message()
m.dn = ldb.Dn(samdb, full_dn)
m["fSMORoleOwner"] = ldb.MessageElement(samdb.get_dsServiceName(),
ldb.FLAG_MOD_REPLACE,
"fSMORoleOwner")
samdb.modify(m)
# Seize other roles
for role in ['rid', 'pdc', 'naming', 'infrastructure', 'schema']:
self.seize_role(role, samdb, force=True)
# Get all DCs and remove them (this ensures these DCs cannot
# replicate because they will not have a password)
search_expr = "(&(objectClass=Server)(serverReference=*))"
res = samdb.search(samdb.get_config_basedn(), scope=ldb.SCOPE_SUBTREE,
expression=search_expr)
for m in res:
cn = str(m.get('cn')[0])
if cn != newservername:
remove_dc(samdb, logger, cn)
# Remove the repsFrom and repsTo from each NC to ensure we do
# not try (and fail) to talk to the old DCs
for nc in ncs:
msg = ldb.Message()
msg.dn = ldb.Dn(samdb, nc)
msg["repsFrom"] = ldb.MessageElement([],
ldb.FLAG_MOD_REPLACE,
"repsFrom")
msg["repsTo"] = ldb.MessageElement([],
ldb.FLAG_MOD_REPLACE,
"repsTo")
samdb.modify(msg)
# Update the krbtgt passwords twice, ensuring no tickets from
# the old domain are valid
update_krbtgt_account_password(samdb)
update_krbtgt_account_password(samdb)
# restore the sysvol directory from the backup tar file, including the
# original NTACLs. Note that the backup_restore() will fail if not root
sysvol_tar = os.path.join(targetdir, 'sysvol.tar.gz')
dest_sysvol_dir = lp.get('path', 'sysvol')
if not os.path.exists(dest_sysvol_dir):
os.makedirs(dest_sysvol_dir)
backup_restore(sysvol_tar, dest_sysvol_dir, samdb, smbconf)
os.remove(sysvol_tar)
# fix up any stale links to the old DCs we just removed
logger.info("Fixing up any remaining references to the old DCs...")
self.fix_old_dc_references(samdb)
# Remove DB markers added by the backup process
self.remove_backup_markers(samdb)
logger.info("Backup file successfully restored to %s" % targetdir)
logger.info("Please check the smb.conf settings are correct before "
"starting samba.")
class cmd_domain_backup_rename(samba.netcmd.Command):
'''Copy a running DC's DB to backup file, renaming the domain in the process.
Where <new-domain> is the new domain's NetBIOS name, and <new-dnsrealm> is
the new domain's realm in DNS form.
This is similar to 'samba-tool backup online' in that it clones the DB of a
running DC. However, this option also renames all the domain entries in the
DB. Renaming the domain makes it possible to restore and start a new Samba
DC without it interfering with the existing Samba domain. In other words,
you could use this option to clone your production samba domain and restore
it to a separate pre-production environment that won't overlap or interfere
with the existing production Samba domain.
Note that:
- it's recommended to run 'samba-tool dbcheck' before taking a backup-file
and fix any errors it reports.
- all the domain's secrets are included in the backup file.
- although the DB contents can be untarred and examined manually, you need
to run 'samba-tool domain backup restore' before you can start a Samba DC
from the backup file.
- GPO and sysvol information will still refer to the old realm and will
need to be updated manually.
- if you specify 'keep-dns-realm', then the DNS records will need updating
in order to work (they will still refer to the old DC's IP instead of the
new DC's address).
- we recommend that you only use this option if you know what you're doing.
'''
synopsis = ("%prog <new-domain> <new-dnsrealm> --server=<DC-to-backup> "
"--targetdir=<output-dir>")
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("--server", help="The DC to backup", type=str),
Option("--targetdir", help="Directory to write the backup file",
type=str),
Option("--keep-dns-realm", action="store_true", default=False,
help="Retain the DNS entries for the old realm in the backup"),
Option("--no-secrets", action="store_true", default=False,
help="Exclude secret values from the backup created"),
Option("--backend-store", type="choice", metavar="BACKENDSTORE",
choices=["tdb", "mdb"],
help="Specify the database backend to be used "
"(default is %s)" % get_default_backend_store()),
]
takes_args = ["new_domain_name", "new_dns_realm"]
def update_dns_root(self, logger, samdb, old_realm, delete_old_dns):
'''Updates dnsRoot for the partition objects to reflect the rename'''
# lookup the crossRef objects that hold the old realm's dnsRoot
partitions_dn = samdb.get_partitions_dn()
res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL,
attrs=["dnsRoot"],
expression='(&(objectClass=crossRef)(dnsRoot=*))')
new_realm = samdb.domain_dns_name()
# go through and add the new realm
for res_msg in res:
# dnsRoot can be multi-valued, so only look for the old realm
for dns_root in res_msg["dnsRoot"]:
dns_root = str(dns_root)
dn = res_msg.dn
if old_realm in dns_root:
new_dns_root = re.sub('%s$' % old_realm, new_realm,
dns_root)
logger.info("Adding %s dnsRoot to %s" % (new_dns_root, dn))
m = ldb.Message()
m.dn = dn
m["dnsRoot"] = ldb.MessageElement(new_dns_root,
ldb.FLAG_MOD_ADD,
"dnsRoot")
samdb.modify(m)
# optionally remove the dnsRoot for the old realm
if delete_old_dns:
logger.info("Removing %s dnsRoot from %s" % (dns_root,
dn))
m["dnsRoot"] = ldb.MessageElement(dns_root,
ldb.FLAG_MOD_DELETE,
"dnsRoot")
samdb.modify(m)
# Updates the CN=<domain>,CN=Partitions,CN=Configuration,... object to
# reflect the domain rename
def rename_domain_partition(self, logger, samdb, new_netbios_name):
'''Renames the domain parition object and updates its nETBIOSName'''
# lookup the crossRef object that holds the nETBIOSName (nCName has
# already been updated by this point, but the netBIOS hasn't)
base_dn = samdb.get_default_basedn()
nc_name = ldb.binary_encode(str(base_dn))
partitions_dn = samdb.get_partitions_dn()
res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL,
attrs=["nETBIOSName"],
expression='ncName=%s' % nc_name)
logger.info("Changing backup domain's NetBIOS name to %s" %
new_netbios_name)
m = ldb.Message()
m.dn = res[0].dn
m["nETBIOSName"] = ldb.MessageElement(new_netbios_name,
ldb.FLAG_MOD_REPLACE,
"nETBIOSName")
samdb.modify(m)
# renames the object itself to reflect the change in domain
new_dn = "CN=%s,%s" % (new_netbios_name, partitions_dn)
logger.info("Renaming %s --> %s" % (res[0].dn, new_dn))
samdb.rename(res[0].dn, new_dn, controls=['relax:0'])
def delete_old_dns_zones(self, logger, samdb, old_realm):
# remove the top-level DNS entries for the old realm
basedn = samdb.get_default_basedn()
dn = "DC=%s,CN=MicrosoftDNS,DC=DomainDnsZones,%s" % (old_realm, basedn)
logger.info("Deleting old DNS zone %s" % dn)
samdb.delete(dn, ["tree_delete:1"])
forestdn = samdb.get_root_basedn().get_linearized()
dn = "DC=_msdcs.%s,CN=MicrosoftDNS,DC=ForestDnsZones,%s" % (old_realm,
forestdn)
logger.info("Deleting old DNS zone %s" % dn)
samdb.delete(dn, ["tree_delete:1"])
def fix_old_dn_attributes(self, samdb):
'''Fixes attributes (i.e. objectCategory) that still use the old DN'''
samdb.transaction_start()
# Just fix any mismatches in DN detected (leave any other errors)
chk = dbcheck(samdb, quiet=True, fix=True, yes=False,
in_transaction=True)
# fix up incorrect objectCategory/etc attributes
setattr(chk, 'fix_all_old_dn_string_component_mismatch', 'ALL')
cross_ncs_ctrl = 'search_options:1:2'
controls = ['show_deleted:1', cross_ncs_ctrl]
chk.check_database(controls=controls)
samdb.transaction_commit()
def run(self, new_domain_name, new_dns_realm, sambaopts=None,
credopts=None, server=None, targetdir=None, keep_dns_realm=False,
no_secrets=False, backend_store=None):
logger = self.get_logger()
logger.setLevel(logging.INFO)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
# Make sure we have all the required args.
if server is None:
raise CommandError('Server required')
check_targetdir(logger, targetdir)
delete_old_dns = not keep_dns_realm
new_dns_realm = new_dns_realm.lower()
new_domain_name = new_domain_name.upper()
new_base_dn = samba.dn_from_dns_name(new_dns_realm)
logger.info("New realm for backed up domain: %s" % new_dns_realm)
logger.info("New base DN for backed up domain: %s" % new_base_dn)
logger.info("New domain NetBIOS name: %s" % new_domain_name)
tmpdir = tempfile.mkdtemp(dir=targetdir)
# setup a join-context for cloning the remote server
include_secrets = not no_secrets
ctx = DCCloneAndRenameContext(new_base_dn, new_domain_name,
new_dns_realm, logger=logger,
creds=creds, lp=lp,
include_secrets=include_secrets,
dns_backend='SAMBA_INTERNAL',
server=server, targetdir=tmpdir,
backend_store=backend_store)
# sanity-check we're not "renaming" the domain to the same values
old_domain = ctx.domain_name
if old_domain == new_domain_name:
shutil.rmtree(tmpdir)
raise CommandError("Cannot use the current domain NetBIOS name.")
old_realm = ctx.realm
if old_realm == new_dns_realm:
shutil.rmtree(tmpdir)
raise CommandError("Cannot use the current domain DNS realm.")
# do the clone/rename
ctx.do_join()
# get the paths used for the clone, then drop the old samdb connection
del ctx.local_samdb
paths = ctx.paths
# get a free RID to use as the new DC's SID (when it gets restored)
remote_sam = SamDB(url='ldap://' + server, credentials=creds,
session_info=system_session(), lp=lp)
new_sid = get_sid_for_restore(remote_sam, logger)
# Grab the remote DC's sysvol files and bundle them into a tar file.
# Note we end up with 2 sysvol dirs - the original domain's files (that
# use the old realm) backed here, as well as default files generated
# for the new realm as part of the clone/join.
sysvol_tar = os.path.join(tmpdir, 'sysvol.tar.gz')
smb_conn = smb_sysvol_conn(server, lp, creds)
backup_online(smb_conn, sysvol_tar, remote_sam.get_domain_sid())
# connect to the local DB (making sure we use the new/renamed config)
lp.load(paths.smbconf)
samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp)
# Edit the cloned sam.ldb to mark it as a backup
time_str = get_timestamp()
add_backup_marker(samdb, "backupDate", time_str)
add_backup_marker(samdb, "sidForRestore", new_sid)
add_backup_marker(samdb, "backupRename", old_realm)
add_backup_marker(samdb, "backupType", "rename")
# fix up the DNS objects that are using the old dnsRoot value
self.update_dns_root(logger, samdb, old_realm, delete_old_dns)
# update the netBIOS name and the Partition object for the domain
self.rename_domain_partition(logger, samdb, new_domain_name)
if delete_old_dns:
self.delete_old_dns_zones(logger, samdb, old_realm)
logger.info("Fixing DN attributes after rename...")
self.fix_old_dn_attributes(samdb)
# ensure the admin user always has a password set (same as provision)
if no_secrets:
set_admin_password(logger, samdb)
# Add everything in the tmpdir to the backup tar file
backup_file = backup_filepath(targetdir, new_dns_realm, time_str)
create_log_file(tmpdir, lp, "rename", server, include_secrets,
"Original domain %s (NetBIOS), %s (DNS realm)" %
(old_domain, old_realm))
create_backup_tar(logger, tmpdir, backup_file)
shutil.rmtree(tmpdir)
class cmd_domain_backup_offline(samba.netcmd.Command):
'''Backup the local domain directories safely into a tar file.
Takes a backup copy of the current domain from the local files on disk,
with proper locking of the DB to ensure consistency. If the domain were to
undergo a catastrophic failure, then the backup file can be used to recover
the domain.
An offline backup differs to an online backup in the following ways:
- a backup can be created even if the DC isn't currently running.
- includes non-replicated attributes that an online backup wouldn't store.
- takes a copy of the raw database files, which has the risk that any
hidden problems in the DB are preserved in the backup.'''
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
}
takes_options = [
Option("--targetdir",
help="Output directory (required)",
type=str),
]
backup_ext = '.bak-offline'
def offline_tdb_copy(self, path):
backup_path = path + self.backup_ext
try:
tdb_copy(path, backup_path, readonly=True)
except CalledProcessError as copy_err:
# If the copy didn't work, check if it was caused by an EINVAL
# error on opening the DB. If so, it's a mutex locked database,
# which we can safely ignore.
try:
tdb.open(path)
except Exception as e:
if hasattr(e, 'errno') and e.errno == errno.EINVAL:
return
raise e
raise copy_err
if not os.path.exists(backup_path):
s = "tdbbackup said backup succeeded but {0} not found"
raise CommandError(s.format(backup_path))
def offline_mdb_copy(self, path):
mdb_copy(path, path + self.backup_ext)
# Secrets databases are a special case: a transaction must be started
# on the secrets.ldb file before backing up that file and secrets.tdb
def backup_secrets(self, private_dir, lp, logger):
secrets_path = os.path.join(private_dir, 'secrets')
secrets_obj = Ldb(secrets_path + '.ldb', lp=lp)
logger.info('Starting transaction on ' + secrets_path)
secrets_obj.transaction_start()
self.offline_tdb_copy(secrets_path + '.ldb')
self.offline_tdb_copy(secrets_path + '.tdb')
secrets_obj.transaction_cancel()
# sam.ldb must have a transaction started on it before backing up
# everything in sam.ldb.d with the appropriate backup function.
def backup_smb_dbs(self, private_dir, samdb, lp, logger):
# First, determine if DB backend is MDB. Assume not unless there is a
# 'backendStore' attribute on @PARTITION containing the text 'mdb'
store_label = "backendStore"
res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE,
attrs=[store_label])
mdb_backend = store_label in res[0] and str(res[0][store_label][0]) == 'mdb'
sam_ldb_path = os.path.join(private_dir, 'sam.ldb')
copy_function = None
if mdb_backend:
logger.info('MDB backend detected. Using mdb backup function.')
copy_function = self.offline_mdb_copy
else:
logger.info('Starting transaction on ' + sam_ldb_path)
copy_function = self.offline_tdb_copy
sam_obj = Ldb(sam_ldb_path, lp=lp)
sam_obj.transaction_start()
logger.info(' backing up ' + sam_ldb_path)
self.offline_tdb_copy(sam_ldb_path)
sam_ldb_d = sam_ldb_path + '.d'
for sam_file in os.listdir(sam_ldb_d):
sam_file = os.path.join(sam_ldb_d, sam_file)
if sam_file.endswith('.ldb'):
logger.info(' backing up locked/related file ' + sam_file)
copy_function(sam_file)
else:
logger.info(' copying locked/related file ' + sam_file)
shutil.copyfile(sam_file, sam_file + self.backup_ext)
if not mdb_backend:
sam_obj.transaction_cancel()
# Find where a path should go in the fixed backup archive structure.
def get_arc_path(self, path, conf_paths):
backup_dirs = {"private": conf_paths.private_dir,
"statedir": conf_paths.state_dir,
"etc": os.path.dirname(conf_paths.smbconf)}
matching_dirs = [(_, p) for (_, p) in backup_dirs.items() if
path.startswith(p)]
arc_path, fs_path = matching_dirs[0]
# If more than one directory is a parent of this path, then at least
# one configured path is a subdir of another. Use closest match.
if len(matching_dirs) > 1:
arc_path, fs_path = max(matching_dirs, key=lambda p: len(p[1]))
arc_path += path[len(fs_path):]
return arc_path
def run(self, sambaopts=None, targetdir=None):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
# Get the absolute paths of all the directories we're going to backup
lp = sambaopts.get_loadparm()
paths = samba.provision.provision_paths_from_lp(lp, lp.get('realm'))
if not (paths.samdb and os.path.exists(paths.samdb)):
logger.error("No database found at {0}".format(paths.samdb))
raise CommandError('Please check you are root, and ' +
'are running this command on an AD DC')
check_targetdir(logger, targetdir)
samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp)
sid = get_sid_for_restore(samdb, logger)
backup_dirs = [paths.private_dir, paths.state_dir,
os.path.dirname(paths.smbconf)] # etc dir
logger.info('running backup on dirs: {0}'.format(' '.join(backup_dirs)))
# Recursively get all file paths in the backup directories
all_files = []
for backup_dir in backup_dirs:
for (working_dir, _, filenames) in os.walk(backup_dir):
if working_dir.startswith(paths.sysvol):
continue
if working_dir.endswith('.sock') or '.sock/' in working_dir:
continue
for filename in filenames:
if filename in all_files:
continue
# Assume existing backup files are from a previous backup.
# Delete and ignore.
if filename.endswith(self.backup_ext):
os.remove(os.path.join(working_dir, filename))
continue
# Sock files are autogenerated at runtime, ignore.
if filename.endswith('.sock'):
continue
all_files.append(os.path.join(working_dir, filename))
# Backup secrets, sam.ldb and their downstream files
self.backup_secrets(paths.private_dir, lp, logger)
self.backup_smb_dbs(paths.private_dir, samdb, lp, logger)
# Open the new backed up samdb, flag it as backed up, and write
# the next SID so the restore tool can add objects.
# WARNING: Don't change this code unless you know what you're doing.
# Writing to a .bak file only works because the DN being
# written to happens to be top level.
samdb = SamDB(url=paths.samdb + self.backup_ext,
session_info=system_session(), lp=lp)
time_str = get_timestamp()
add_backup_marker(samdb, "backupDate", time_str)
add_backup_marker(samdb, "sidForRestore", sid)
add_backup_marker(samdb, "backupType", "offline")
# Now handle all the LDB and TDB files that are not linked to
# anything else. Use transactions for LDBs.
for path in all_files:
if not os.path.exists(path + self.backup_ext):
if path.endswith('.ldb'):
logger.info('Starting transaction on solo db: ' + path)
ldb_obj = Ldb(path, lp=lp)
ldb_obj.transaction_start()
logger.info(' running tdbbackup on the same file')
self.offline_tdb_copy(path)
ldb_obj.transaction_cancel()
elif path.endswith('.tdb'):
logger.info('running tdbbackup on lone tdb file ' + path)
self.offline_tdb_copy(path)
# Now make the backup tar file and add all
# backed up files and any other files to it.
temp_tar_dir = tempfile.mkdtemp(dir=targetdir,
prefix='INCOMPLETEsambabackupfile')
temp_tar_name = os.path.join(temp_tar_dir, "samba-backup.tar.bz2")
tar = tarfile.open(temp_tar_name, 'w:bz2')
logger.info('running offline ntacl backup of sysvol')
sysvol_tar_fn = 'sysvol.tar.gz'
sysvol_tar = os.path.join(temp_tar_dir, sysvol_tar_fn)
backup_offline(paths.sysvol, sysvol_tar, samdb, paths.smbconf)
tar.add(sysvol_tar, sysvol_tar_fn)
os.remove(sysvol_tar)
create_log_file(temp_tar_dir, lp, "offline", "localhost", True)
backup_fn = os.path.join(temp_tar_dir, "backup.txt")
tar.add(backup_fn, os.path.basename(backup_fn))
os.remove(backup_fn)
logger.info('building backup tar')
for path in all_files:
arc_path = self.get_arc_path(path, paths)
if os.path.exists(path + self.backup_ext):
logger.info(' adding backup ' + arc_path + self.backup_ext +
' to tar and deleting file')
tar.add(path + self.backup_ext, arcname=arc_path)
os.remove(path + self.backup_ext)
elif path.endswith('.ldb') or path.endswith('.tdb'):
logger.info(' skipping ' + arc_path)
else:
logger.info(' adding misc file ' + arc_path)
tar.add(path, arcname=arc_path)
tar.close()
os.rename(temp_tar_name,
os.path.join(targetdir,
'samba-backup-{0}.tar.bz2'.format(time_str)))
os.rmdir(temp_tar_dir)
logger.info('Backup succeeded.')
class cmd_domain_backup(samba.netcmd.SuperCommand):
'''Create or restore a backup of the domain.'''
subcommands = {'offline': cmd_domain_backup_offline(),
'online': cmd_domain_backup_online(),
'rename': cmd_domain_backup_rename(),
'restore': cmd_domain_backup_restore()}
| kernevil/samba | python/samba/netcmd/domain_backup.py | Python | gpl-3.0 | 50,651 | 0.000138 |
# coding: utf-8
# -----------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# ==================================================
# IncidentDensityTestCase.py
# --------------------------------------------------
# requirments: ArcGIS X.X, Python 2.7 or Python 3.4
# author: ArcGIS Solutions
# company: Esri
# ==================================================
# history:
# 12/16/2015 - JH - initial creation
# ==================================================
import unittest
import arcpy
import os
import UnitTestUtilities
import Configuration
import DataDownload
class IncidentDensityTestCase(unittest.TestCase):
''' Test all tools and methods related to the Incident Density tool
in the Incident Analysis toolbox'''
inputPointFeatures = None
inputBoundaryFeatures = None
def setUp(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.setUp")
UnitTestUtilities.checkArcPy()
Configuration.incidentDataPath = DataDownload.runDataDownload(Configuration.patternsPaths, Configuration.incidentGDBName, Configuration.incidentURL)
if (Configuration.incidentScratchGDB == None) or (not arcpy.Exists(Configuration.incidentScratchGDB)):
Configuration.incidentScratchGDB = UnitTestUtilities.createScratch(Configuration.incidentDataPath)
Configuration.incidentInputGDB = os.path.join(Configuration.incidentDataPath, Configuration.incidentGDBName)
UnitTestUtilities.checkFilePaths([Configuration.incidentDataPath, Configuration.incidentInputGDB, Configuration.patterns_ProToolboxPath, Configuration.patterns_DesktopToolboxPath])
self.inputPointFeatures = os.path.join(Configuration.incidentInputGDB, "Incidents")
self.inputBoundaryFeatures = os.path.join(Configuration.incidentInputGDB, "Districts")
def tearDown(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.tearDown")
UnitTestUtilities.deleteScratch(Configuration.incidentScratchGDB)
def test_incident_density_pro(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.test_incident_density_pro")
arcpy.AddMessage("Testing Incident Density (Pro).")
self.test_incident_density(Configuration.patterns_ProToolboxPath)
def test_incident_density_desktop(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.test_incident_density_desktop")
arcpy.AddMessage("Testing Incident Density (Desktop).")
self.test_incident_density(Configuration.patterns_DesktopToolboxPath)
def test_incident_density(self, toolboxPath):
try:
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.test_incident_density")
arcpy.CheckOutExtension("Spatial")
arcpy.ImportToolbox(toolboxPath, "iaTools")
runToolMsg = "Running tool (Incident Density)"
arcpy.AddMessage(runToolMsg)
Configuration.Logger.info(runToolMsg)
outputDensity = os.path.join(Configuration.incidentScratchGDB, "outputDensity")
arcpy.IncidentDensity_iaTools(self.inputPointFeatures, self.inputBoundaryFeatures, outputDensity)
arcpy.CheckInExtension("Spatial")
self.assertTrue(arcpy.Exists(outputDensity))
except arcpy.ExecuteError:
UnitTestUtilities.handleArcPyError()
except:
UnitTestUtilities.handleGeneralError()
| jfrygeo/solutions-geoprocessing-toolbox | utils/test/patterns_tests/IncidentDensityTestCase.py | Python | apache-2.0 | 4,288 | 0.011194 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import unittest
import marshmallow
from airflow.api_connexion.schemas.connection_schema import (
ConnectionCollection,
connection_collection_item_schema,
connection_collection_schema,
connection_schema,
)
from airflow.models import Connection
from airflow.utils.session import create_session, provide_session
from tests.test_utils.db import clear_db_connections
class TestConnectionCollectionItemSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model = Connection(
conn_id='mysql_default',
conn_type='mysql',
host='mysql',
login='login',
schema='testschema',
port=80,
)
session.add(connection_model)
session.commit()
connection_model = session.query(Connection).first()
deserialized_connection = connection_collection_item_schema.dump(connection_model)
self.assertEqual(
deserialized_connection,
{
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
},
)
def test_deserialize(self):
connection_dump_1 = {
'connection_id': "mysql_default_1",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
}
connection_dump_2 = {
'connection_id': "mysql_default_2",
'conn_type': "postgres",
}
result_1 = connection_collection_item_schema.load(connection_dump_1)
result_2 = connection_collection_item_schema.load(connection_dump_2)
self.assertEqual(
result_1,
{
'conn_id': "mysql_default_1",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
},
)
self.assertEqual(
result_2,
{
'conn_id': "mysql_default_2",
'conn_type': "postgres",
},
)
def test_deserialize_required_fields(self):
connection_dump_1 = {
'connection_id': "mysql_default_2",
}
with self.assertRaisesRegex(
marshmallow.exceptions.ValidationError,
re.escape("{'conn_type': ['Missing data for required field.']}"),
):
connection_collection_item_schema.load(connection_dump_1)
class TestConnectionCollectionSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model_1 = Connection(conn_id='mysql_default_1', conn_type='test-type')
connection_model_2 = Connection(conn_id='mysql_default_2', conn_type='test-type2')
connections = [connection_model_1, connection_model_2]
session.add_all(connections)
session.commit()
instance = ConnectionCollection(connections=connections, total_entries=2)
deserialized_connections = connection_collection_schema.dump(instance)
self.assertEqual(
deserialized_connections,
{
'connections': [
{
"connection_id": "mysql_default_1",
"conn_type": "test-type",
"host": None,
"login": None,
'schema': None,
'port': None,
},
{
"connection_id": "mysql_default_2",
"conn_type": "test-type2",
"host": None,
"login": None,
'schema': None,
'port': None,
},
],
'total_entries': 2,
},
)
class TestConnectionSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model = Connection(
conn_id='mysql_default',
conn_type='mysql',
host='mysql',
login='login',
schema='testschema',
port=80,
password='test-password',
extra="{'key':'string'}",
)
session.add(connection_model)
session.commit()
connection_model = session.query(Connection).first()
deserialized_connection = connection_schema.dump(connection_model)
self.assertEqual(
deserialized_connection,
{
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
},
)
def test_deserialize(self):
den = {
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
}
result = connection_schema.load(den)
self.assertEqual(
result,
{
'conn_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
},
)
| DinoCow/airflow | tests/api_connexion/schemas/test_connection_schema.py | Python | apache-2.0 | 7,097 | 0.000564 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "2.0.0rc1"
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/version.py | Python | mit | 348 | 0 |
# Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
import wx.lib.newevent
from gettext import gettext as _
class FileChooser(wx.Panel):
FilePathChangedEvent, EVT_FILE_PATH_CHANGED = wx.lib.newevent.NewEvent()
BORDER = 1
def __init__(self, parent,
dialog_message=_("Choose file"),
dialog_dir="",
dialog_wildcard="*",
**kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self._dialog_message = dialog_message
self._dialog_dir = dialog_dir
self._dialog_wildcard = dialog_wildcard
self._create_gui()
def GetFilePath(self):
return self._path_text_field.GetValue()
def _create_gui(self):
self._create_path_text_field()
self._create_browse_button()
self._layout_components()
def _create_path_text_field(self):
self._path_text_field = wx.TextCtrl(self)
self._path_text_field.Bind(wx.EVT_TEXT, self._on_path_text_changed)
def _on_path_text_changed(self, evt):
wx.PostEvent(self, self.FilePathChangedEvent())
def _create_browse_button(self):
self._browse_button = wx.Button(self, wx.ID_OPEN)
self._browse_button.Bind(wx.EVT_BUTTON, self._on_browse_button_click)
def _on_browse_button_click(self, evt):
dialog = wx.FileDialog(self,
message=self._dialog_message,
defaultDir=self._dialog_dir,
wildcard=self._dialog_wildcard,
style=wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_OK:
self._path_text_field.SetValue(dialog.GetPath())
dialog.Destroy()
def _layout_components(self):
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self._path_text_field,
proportion=1,
flag=wx.ALL|wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
sizer.Add(self._browse_button,
proportion=0,
flag=wx.ALL|wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
self.SetSizer(sizer)
| ezequielpereira/Time-Line | timelinelib/wxgui/components/filechooser.py | Python | gpl-3.0 | 2,868 | 0.001046 |
# encoding: utf-8
from __future__ import unicode_literals
import operator
import pytest
from marrow.mongo import Filter
from marrow.schema.compat import odict, py3
@pytest.fixture
def empty_ops(request):
return Filter()
@pytest.fixture
def single_ops(request):
return Filter({'roll': 27})
def test_ops_iteration(single_ops):
assert list(iter(single_ops)) == ['roll']
class TestOpsMapping(object):
def test_getitem(self, empty_ops, single_ops):
with pytest.raises(KeyError):
empty_ops['roll']
assert single_ops['roll'] == 27
def test_setitem(self, empty_ops):
assert repr(empty_ops) == "Filter([])"
empty_ops['meaning'] = 42
if py3:
assert repr(empty_ops) == "Filter([('meaning', 42)])"
else:
assert repr(empty_ops) == "Filter([(u'meaning', 42)])"
def test_delitem(self, empty_ops, single_ops):
with pytest.raises(KeyError):
del empty_ops['roll']
if py3:
assert repr(single_ops) == "Filter([('roll', 27)])"
else:
assert repr(single_ops) == "Filter([(u'roll', 27)])"
del single_ops['roll']
assert repr(single_ops) == "Filter([])"
def test_length(self, empty_ops, single_ops):
assert len(empty_ops) == 0
assert len(single_ops) == 1
def test_keys(self, empty_ops, single_ops):
assert list(empty_ops.keys()) == []
assert list(single_ops.keys()) == ['roll']
def test_items(self, empty_ops, single_ops):
assert list(empty_ops.items()) == []
assert list(single_ops.items()) == [('roll', 27)]
def test_values(self, empty_ops, single_ops):
assert list(empty_ops.values()) == []
assert list(single_ops.values()) == [27]
def test_contains(self, single_ops):
assert 'foo' not in single_ops
assert 'roll' in single_ops
def test_equality_inequality(self, empty_ops, single_ops):
assert empty_ops == {}
assert empty_ops != {'roll': 27}
assert single_ops != {}
assert single_ops == {'roll': 27}
def test_get(self, single_ops):
assert single_ops.get('foo') is None
assert single_ops.get('foo', 42) == 42
assert single_ops.get('roll') == 27
def test_clear(self, single_ops):
assert len(single_ops.operations) == 1
single_ops.clear()
assert len(single_ops.operations) == 0
def test_pop(self, single_ops):
assert len(single_ops.operations) == 1
with pytest.raises(KeyError):
single_ops.pop('foo')
assert single_ops.pop('foo', 42) == 42
assert len(single_ops.operations) == 1
assert single_ops.pop('roll') == 27
assert len(single_ops.operations) == 0
def test_popitem(self, single_ops):
assert len(single_ops.operations) == 1
assert single_ops.popitem() == ('roll', 27)
assert len(single_ops.operations) == 0
with pytest.raises(KeyError):
single_ops.popitem()
def test_update(self, empty_ops, single_ops):
assert len(empty_ops.operations) == 0
empty_ops.update(name="Bob Dole")
assert len(empty_ops.operations) == 1
if py3:
assert repr(empty_ops) == "Filter([('name', 'Bob Dole')])"
else:
assert repr(empty_ops) == "Filter([('name', u'Bob Dole')])"
assert len(single_ops.operations) == 1
if py3:
assert repr(single_ops) == "Filter([('roll', 27)])"
else:
assert repr(single_ops) == "Filter([(u'roll', 27)])"
single_ops.update([('name', "Bob Dole")])
assert len(single_ops.operations) == 2
if py3:
assert repr(single_ops) in ("Filter([('roll', 27), ('name', 'Bob Dole')])", "Filter([('name', 'Bob Dole'), ('roll', 27)])")
else:
assert repr(single_ops) in ("Filter([(u'roll', 27), (u'name', u'Bob Dole')])", "Filter([(u'name', u'Bob Dole'), (u'roll', 27)])")
def test_setdefault(self, empty_ops):
assert len(empty_ops.operations) == 0
empty_ops.setdefault('fnord', 42)
assert len(empty_ops.operations) == 1
assert empty_ops.operations['fnord'] == 42
empty_ops.setdefault('fnord', 27)
assert len(empty_ops.operations) == 1
assert empty_ops.operations['fnord'] == 42
def test_ops_shallow_copy(self, single_ops):
assert single_ops.operations == single_ops.copy().operations
class TestOperationsCombination(object):
def test_operations_and_clean_merge(self):
comb = Filter({'roll': 27}) & Filter({'foo': 42})
assert comb.as_query == {'roll': 27, 'foo': 42}
def test_operations_and_operator_overlap(self):
comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': {'$lte': 42}})
assert comb.as_query == {'roll': {'$gte': 27, '$lte': 42}}
def test_paradoxical_condition(self):
comb = Filter({'roll': 27}) & Filter({'roll': {'$lte': 42}})
assert comb.as_query == {'roll': {'$eq': 27, '$lte': 42}}
comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': 42})
assert list(comb.as_query['roll'].items()) in ([('$gte', 27), ('$eq', 42)], [('$eq', 42), ('$gte', 27)])
def test_operations_or_clean_merge(self):
comb = Filter({'roll': 27}) | Filter({'foo': 42})
assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}]}
comb = comb | Filter({'bar': 'baz'})
assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}, {'bar': 'baz'}]}
def test_operations_hard_and(self):
comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'$and': [{'c': 3}]})
assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}, {'c': 3}]}
def test_operations_soft_and(self):
comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'c': 3})
assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}], 'c': 3}
| marrow/mongo | test/query/test_ops.py | Python | mit | 5,358 | 0.035274 |
import unittest
import os
from json_validator.validator import validate_params, ValidationError
arg1 = "something"
arg2 = "something_else"
schema_dirpath = os.path.dirname(os.path.realpath(__file__))
schema_filepath = os.path.join(schema_dirpath, "schema.json")
correct_params = {"param1": "some string",
"param2": ["string_in_array", "string_in_array2"]}
wrong_params = {"param1": ["string_in_array", "string_in_array2"],
"param2": "string"}
class JsonValidatorTest(unittest.TestCase):
def test_default_params_var_name(self):
@validate_params(schema_filename=schema_filepath)
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, correct_params),
"Returned by function"
)
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Wrong params!'}
)
def test_non_default_params_var_name(self):
@validate_params(schema_filename=schema_filepath,
params_variable="params_test")
def test_function(first_arg, second_arg, params_test):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, correct_params),
"Returned by function"
)
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Wrong params!'}
)
def test_debug(self):
@validate_params(schema_filename=schema_filepath,
debug=True)
def test_function(first_arg, second_arg, params):
return "Returned by function"
with self.assertRaises(ValidationError):
test_function(arg1, arg2, wrong_params)
def test_message(self):
@validate_params(schema_filename=schema_filepath,
message="Message test!")
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Message test!'}
)
def test_decorator_without_arguments(self):
@validate_params
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, correct_params),
"Returned by function"
)
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Wrong params!'}
)
def test_none_params(self):
@validate_params
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(test_function(arg1, arg2, params=None),
{'status': 'Wrong params!'})
def test_no_params_at_all(self):
@validate_params
def test_function(first_arg, second_arg):
return "Returned by function"
self.assertRaises(
Exception,
test_function, arg1, arg2
)
def test_get_params_from_args(self):
@validate_params
def test_function(params):
return "Returned by function"
self.assertEqual(
test_function(correct_params),
"Returned by function"
)
def test_get_params_from_kwargs(self):
@validate_params
def test_function(params):
return "Returned by function"
self.assertEqual(
test_function(params=correct_params),
"Returned by function"
)
def save_schema_to_json():
'''
Save some example schema to json file
'''
import json
schema = {
"required": [
"param1"
],
"type": "object",
"properties": {
"param1": {
"type": "string"
},
"param2": {
"type": "array"
}
}
}
with open("schema.json", "w") as jsonout:
json.dump(schema, jsonout, indent=4)
if __name__ == '__main__':
unittest.main()
| sliwinski-milosz/json_validator | tests/test_json_validator.py | Python | mit | 4,213 | 0 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.extract.scaling Contains the ScalingExtractor class, used for extracting scaling information
# from a simulation's log files.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.table import Table
# -----------------------------------------------------------------
class ScalingExtractor(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# -- Attributes --
# The parallelization mode
self.mode = None
# The number of processes and threads
self.processes = None
self.threads = None
# The path to the scaling file
self.scaling_file_path = None
# The scaling table
self.table = None
# The timeline and memory usage tables
self.timeline = None
self.memory = None
# -----------------------------------------------------------------
def run(self, simulation, timeline, memory):
"""
This function ...
:return:
:param simulation:
:param timeline:
:param memory:
"""
# Set the parallelization mode
self.mode = simulation.analysis.scaling_run_name.split("__")[4]
# Set the number of processes and threads
self.processes = simulation.processes()
self.threads = simulation.threads()
# Set the path to the scaling file
self.scaling_file_path = simulation.analysis.scaling_data_file
# Cache local references to the timeline and memory usage tables
self.timeline = timeline
self.memory = memory
# Write the relevant of the current simulation
self.write()
# Read in the extracted scaling table
self.read()
# Return the scaling table
return self.table
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Open the output file
resultfile = open(self.scaling_file_path, 'a')
# Add a line to the output file containing the runtimes for the current simulation
resultfile.write(self.mode + ' ' + str(self.processes) + ' ' + str(self.threads) + ' ' + str(self.timeline.setup)
+ ' ' + str(self.timeline.stellar) + ' ' + str(self.timeline.spectra) + ' ' + str(self.timeline.dust)
+ ' ' + str(self.timeline.writing) + ' ' + str(self.timeline.waiting) + ' ' + str(self.timeline.communication)
+ ' ' + str(self.timeline.total) + ' ' + str(self.memory.peak) + '\n')
# Close the output file
resultfile.close()
# -----------------------------------------------------------------
def read(self):
"""
This function ...
:return:
"""
# Read in the scaling data file
self.table = Table.read(self.scaling_file_path, format="ascii.ecsv")
# -----------------------------------------------------------------
| SKIRT/PTS | core/extract/scaling.py | Python | agpl-3.0 | 3,571 | 0.002241 |
"""star subcommand tests"""
# (c) 2015-2021 Wibowo Arindrarto <contact@arindrarto.dev>
import json
import pytest
from click.testing import CliRunner
from crimson.cli import main
from .utils import get_test_path
@pytest.fixture(scope="module")
def star_fail():
runner = CliRunner()
in_file = get_test_path("star_nope.txt")
result = runner.invoke(main, ["star", in_file])
return result
@pytest.fixture(scope="module")
def star_v230_01():
runner = CliRunner()
in_file = get_test_path("star_v230_01.txt")
result = runner.invoke(main, ["star", in_file])
result.json = json.loads(result.output)
return result
@pytest.fixture(scope="module")
def star_v230_02():
runner = CliRunner()
in_file = get_test_path("star_v230_02.txt")
result = runner.invoke(main, ["star", in_file])
result.json = json.loads(result.output)
return result
def test_star_fail_exit_code(star_fail):
assert star_fail.exit_code != 0
def test_star_fail_output(star_fail):
err_msg = "Unexpected file structure. No contents parsed."
assert err_msg in star_fail.output
@pytest.mark.parametrize(
"attr, exp",
[
("avgDeletionLength", 1.36),
("avgInputLength", 98),
("avgInsertionLength", 1.21),
("avgMappedLength", 98.27),
("mappingSpeed", 403.16),
("nInput", 14782416),
("nMappedMultipleLoci", 1936775),
("nMappedTooManyLoci", 27644),
("nSplicesATAC", 2471),
("nSplicesAnnotated", 3780876),
("nSplicesGCAG", 22344),
("nSplicesGTAG", 3780050),
("nSplicesNonCanonical", 5148),
("nSplicesTotal", 3810013),
("nUniquelyMapped", 12347431),
("pctMappedMultipleLoci", 13.1),
("pctMappedTooManyLoci", 0.19),
("pctUniquelyMapped", 83.53),
("pctUnmappedForOther", 0.03),
("pctUnmappedForTooManyMismatches", 0.0),
("pctUnmappedForTooShort", 3.16),
("rateDeletionPerBase", 0.0),
("rateInsertionPerBase", 0.0),
("rateMismatchPerBase", 0.24),
("timeEnd", "Dec 11 19:01:56"),
("timeJobStart", "Dec 11 18:55:02"),
("timeMappingStart", "Dec 11 18:59:44"),
],
)
def test_star_v230_01(star_v230_01, attr, exp):
assert star_v230_01.json.get(attr) == exp, attr
| bow/crimson | tests/test_star.py | Python | bsd-3-clause | 2,311 | 0 |
#!/usr/bin/python
#
# Retrieve information on an existing VPC.
#
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
import boto.vpc
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource_tags=dict(type='dict', required=True)
))
module = AnsibleModule(argument_spec=argument_spec)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg="region must be specified")
try:
connection = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
vpcs = connection.get_all_vpcs()
vpcs_w_resources = filter(
lambda x: x.tags == module.params.get('resource_tags'), vpcs)
if len(vpcs_w_resources) != 1:
if len(vpcs_w_resources) == 0:
module.fail_json(msg="No vpc found")
else:
module.fail_json(msg="Multiple VPCs with specified resource_tags")
vpc = vpcs_w_resources[0]
subnets = connection.get_all_subnets(filters={'vpc_id': vpc.id})
def subnet_data(s):
d = s.__dict__
del d["connection"]
del d["region"]
return d
data = map(subnet_data, subnets)
facts = {
'ec2_vpc': {
'id': vpc.id,
'subnets': data
}
}
module.exit_json(changed=False, ansible_facts=facts)
main()
| rackn/container-networking-ansible | test/common/library/ec2_vpc_facts.py | Python | apache-2.0 | 1,587 | 0.00063 |
#If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
print sum([x for x in xrange(1,1000) if (x % 3 == 0) or (x % 5 == 0)])
| ecolitan/projecteuler-answers | Multiples_of_3_and_5.py | Python | gpl-2.0 | 260 | 0.015385 |
import os
os.environ['KIVY_GL_BACKEND'] = 'gl' #need this to fix a kivy segfault that occurs with python3 for some reason
from kivy.app import App
class TestApp(App):
pass
if __name__ == '__main__':
TestApp().run()
| ISS-Mimic/Mimic | Pi/kivytest/Test_Kivy.py | Python | mit | 225 | 0.026667 |
# -*- coding: iso-8859-1 -*-
""" crypto.cipher.rijndael
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr1
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
| felipenaselva/felipe.repository | script.module.cryptopy/lib/crypto/cipher/rijndael.py | Python | gpl-2.0 | 14,723 | 0.051484 |
#!/usr/bin/env python3
# Copyright (C) British Crown (Met Office) & Contributors.
# -----------------------------------------------------------------------------
import metomi.rose.macro
class NullChecker(metomi.rose.macro.MacroBase):
"""Class to report errors for missing or null settings."""
REPORTS_INFO = [
(None, None, None, "Warning for null section, null option"),
("made", "up", None, "Warning for non-data & non-metadata setting"),
]
def validate(self, config, meta_config):
"""Validate meaningless settings."""
self.reports = []
for section, option, value, message in self.REPORTS_INFO:
self.add_report(section, option, value, message, is_warning=True)
return self.reports
| metomi/rose | demo/rose-config-edit/demo_meta/app/05-validate/meta/lib/python/macros/null.py | Python | gpl-3.0 | 765 | 0 |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='memd',
version='0.0.1',
url='https://github.com/gvnn/memd',
packages=['memd'],
install_requires=['python-memcached']
) | gvnn/memd | setup.py | Python | mit | 208 | 0.004808 |
"""fix invalid RAISE NOTICE in update_signatures_hourly.
Revision ID: 495bf3fcdb63
Revises: 3f007539efc
Create Date: 2014-07-07 20:33:34.634141
"""
# revision identifiers, used by Alembic.
revision = '495bf3fcdb63'
down_revision = '1baef149e5d1'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
load_stored_proc(op, ['update_signatures_hourly.sql'])
def downgrade():
load_stored_proc(op, ['update_signatures_hourly.sql'])
| AdrianGaudebert/socorro | alembic/versions/495bf3fcdb63_fix_invalid_notice_in_update_signatures_hourly.py | Python | mpl-2.0 | 692 | 0.011561 |
# -*- coding: utf8 -*-
'''
Python SSAP API
Version 1.5
© Indra Sistemas, S.A.
2014 SPAIN
All rights reserved
'''
import sys
def bytes2String(data):
'''
Converts a Python 3 bytes object to a string.
'''
if sys.version_info[0] < 3:
return data
else:
return data.decode("utf-8")
| Sofia2/python-api | src/ssap/utils/strings.py | Python | apache-2.0 | 334 | 0.015015 |
import time, types, os, sys, signal
import multiprocessing
"""
This class is simple enough to allow workers which take incomming log lines and do things with them.
I really dont know what people will want to do with there logs and how they will want to output them
but this is where they will be able to control the output system.
"""
workers = {}
def workerTarget(*args, **kw):
global workers
method_name = args[0]
return getattr(workers[args[0]], 'worker_loop')(*args[1:])
class workerBase(object):
def __init__(self, name="master", pool_size=0, queue_size=1024):
self.queue_size, self.pool_size, self.name = queue_size, pool_size, name
self.queue_active = True
self.workers_stopped = False
signal.signal(signal.SIGINT, self.signal_handler) #shut down hoandler
signal.signal(signal.SIGTERM, self.signal_handler) #shut down hoandler
if self.pool_size == 0: #if no size is set lets use the total number of processses the system have
self.pool_size = multiprocessing.cpu_count()
global workers
workers[name] = self
def run(self):
if isinstance (self.worker_loop, types.MethodType):
args = list()
args.insert(0, self.name)
self.queue = multiprocessing.Queue(self.queue_size)
self.pool = multiprocessing.Pool(self.pool_size)
for x in range(0, self.pool_size): #avoid needing to run map but still get all the workers to start up
self.pool.apply_async(workerTarget, args)
return self
def queue_line(self, entry, metadata): #put the data in the queue
if self.queue_active == True:
try:
self.queue.put([entry, metadata], False) #should come up with a better method that the server will wait on false and try to queue there
except Queue.Full, e:
print (str(e))
except Exception, e:
sys.stderr.write("queue_line: "+str(e)+"\n")
else:
return False
def worker_stop(self):
self.queue_active = False
self.stop_loop = True
if self.workers_stopped == False:
while self.stop_loop == True:
if self.queue.empty == False:
time.sleep(1)
sys.stderr.write("Waiting for queue: "+queue+" to reach 0, currntly at "+str(self.queue.qsize()))
else:
try:
self.queue.close() # close the queue now since its empty
except:
pass
sys.stderr.write("Giving the workers a little more time to finish there last task\n")
self.stop_loop = False
self.workers_stopped = False
time.sleep(2)
try:
sys.stderr.write("Closing pool\n")
self.pool.close()
sys.stderr.write("after pool close\n")
finally:
sys.stderr.write("")
exit()
sys.stderr.write("")
exit(False)
def worker_loop(self): #to simplyfiy things this is the loop that feeds the data into the worker so users just need to handle data entry or what ever
while self.queue.empty == False or self.workers_stopped == False:
try:
#sys.stderr.write("Queue size: "+str(self.queue.qsize())+" @ "+str(time.time())+"\n")
todo = self.queue.get()
#print sys.stderr.write("Queue object: "+str(todo)+"\n")
self.worker(todo[0], todo[1])
#time.sleep(1)
except Queue.Empty, e:
print (str(e))
time.sleep(1)
except Exception, e:
sys.stderr.write("worker_loop: "+str(e)+"\n")
exit()
return True
def worker(self, entry, metadata):
raise NotImplementedError( "Write a method that gets run as a callback once for every log entry worker(self, entry, metadata)" )
def signal_handler(self, signal, frame):
self.worker_stop()
| wojons/transcribe | helpers/workerBase.py | Python | mit | 4,287 | 0.013529 |
__author__ = 'johan'
| joharei/QtChordii | utils/__init__.py | Python | gpl-3.0 | 21 | 0 |
"""Test code for upsampling"""
import numpy as np
import tvm
import topi
import topi.testing
import math
def verify_upsampling(batch, in_channel, in_height, in_width, scale, layout='NCHW', method="NEAREST_NEIGHBOR"):
if layout == 'NCHW':
A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')
dtype = A.dtype
out_shape = (batch, in_channel, in_height*scale, in_width*scale)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif layout == 'NHWC':
A = tvm.placeholder((batch, in_height, in_width, in_channel), name='A')
dtype = A.dtype
out_shape = (batch, in_height*scale, in_width*scale, in_channel)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError(
'Layout not supported {} '.format(layout))
B = topi.nn.upsampling(A, scale, layout=layout, method=method)
if method == "BILINEAR":
out_size = (in_height*scale, in_width*scale)
b_np = topi.testing.bilinear_resize_python(a_np, out_size, layout)
else:
b_np = topi.testing.upsampling_python(a_np, scale, layout)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5)
for device in ['llvm', 'cuda', 'vulkan', 'nvptx']:
check_device(device)
def test_upsampling():
# NEAREST_NEIGHBOR - NCHW
verify_upsampling(8, 16, 32, 32, 2)
verify_upsampling(12, 32, 64, 64, 3)
# NEAREST_NEIGHBOR - NHWC
verify_upsampling(8, 16, 32, 32, 2, layout="NHWC")
verify_upsampling(12, 32, 64, 64, 3, layout="NHWC")
# BILINEAR - NCHW
verify_upsampling(2, 2, 32, 32, 2, method="BILINEAR")
verify_upsampling(2, 2, 32, 32, 3, method="BILINEAR")
# BILINEAR - NHWC
verify_upsampling(2, 2, 32, 32, 2, layout="NHWC", method="BILINEAR")
verify_upsampling(2, 2, 32, 32, 3, layout="NHWC", method="BILINEAR")
if __name__ == "__main__":
test_upsampling()
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/tests/python/test_topi_upsampling.py | Python | apache-2.0 | 2,484 | 0.002818 |
import sys
from math import *
if __name__ == '__main__':
try:
if sys.argv[1] == '-h':
print '''Cosmology calculator ala Ned Wright (www.astro.ucla.edu/~wright)
input values = redshift, Ho, Omega_m, Omega_vac
ouput values = age at z, distance in Mpc, kpc/arcsec, apparent to abs mag conversion
Options: -h for this message
-v for verbose response '''
sys.exit()
if sys.argv[1] == '-v':
verbose=1
length=len(sys.argv)-1
else:
verbose=0
length=len(sys.argv)
# if no values, assume Benchmark Model, input is z
if length == 2:
if float(sys.argv[1+verbose]) > 100:
z=float(sys.argv[1+verbose])/299790. # velocity to redshift
else:
z=float(sys.argv[1+verbose]) # redshift
H0 = 75 # Hubble constant
WM = 0.3 # Omega(matter)
WV = 1.0 - WM - 0.4165/(H0*H0) # Omega(vacuum) or lambda
# if one value, assume Benchmark Model with given Ho
elif length == 3:
z=float(sys.argv[1+verbose]) # redshift
H0 = float(sys.argv[2+verbose]) # Hubble constant
WM = 0.3 # Omega(matter)
WV = 1.0 - WM - 0.4165/(H0*H0) # Omega(vacuum) or lambda
# if Univ is Open, use Ho, Wm and set Wv to 0.
elif length == 4:
z=float(sys.argv[1+verbose]) # redshift
H0 = float(sys.argv[2+verbose]) # Hubble constant
WM = float(sys.argv[3+verbose]) # Omega(matter)
WV = 0.0 # Omega(vacuum) or lambda
# if Univ is General, use Ho, Wm and given Wv
elif length == 5:
z=float(sys.argv[1+verbose]) # redshift
H0 = float(sys.argv[2+verbose]) # Hubble constant
WM = float(sys.argv[3+verbose]) # Omega(matter)
WV = float(sys.argv[4+verbose]) # Omega(vacuum) or lambda
# or else fail
else:
print 'need some values or too many values'
sys.exit()
# initialize constants
WR = 0. # Omega(radiation)
WK = 0. # Omega curvaturve = 1-Omega(total)
c = 299792.458 # velocity of light in km/sec
Tyr = 977.8 # coefficent for converting 1/H into Gyr
DTT = 0.5 # time from z to now in units of 1/H0
DTT_Gyr = 0.0 # value of DTT in Gyr
age = 0.5 # age of Universe in units of 1/H0
age_Gyr = 0.0 # value of age in Gyr
zage = 0.1 # age of Universe at redshift z in units of 1/H0
zage_Gyr = 0.0 # value of zage in Gyr
DCMR = 0.0 # comoving radial distance in units of c/H0
DCMR_Mpc = 0.0
DCMR_Gyr = 0.0
DA = 0.0 # angular size distance
DA_Mpc = 0.0
DA_Gyr = 0.0
kpc_DA = 0.0
DL = 0.0 # luminosity distance
DL_Mpc = 0.0
DL_Gyr = 0.0 # DL in units of billions of light years
V_Gpc = 0.0
a = 1.0 # 1/(1+z), the scale factor of the Universe
az = 0.5 # 1/(1+z(object))
h = H0/100.
WR = 4.165E-5/(h*h) # includes 3 massless neutrino species, T0 = 2.72528
WK = 1-WM-WR-WV
az = 1.0/(1+1.0*z)
age = 0.
n=1000 # number of points in integrals
for i in range(n):
a = az*(i+0.5)/n
adot = sqrt(WK+(WM/a)+(WR/(a*a))+(WV*a*a))
age = age + 1./adot
zage = az*age/n
zage_Gyr = (Tyr/H0)*zage
DTT = 0.0
DCMR = 0.0
# do integral over a=1/(1+z) from az to 1 in n steps, midpoint rule
for i in range(n):
a = az+(1-az)*(i+0.5)/n
adot = sqrt(WK+(WM/a)+(WR/(a*a))+(WV*a*a))
DTT = DTT + 1./adot
DCMR = DCMR + 1./(a*adot)
DTT = (1.-az)*DTT/n
DCMR = (1.-az)*DCMR/n
age = DTT+zage
age_Gyr = age*(Tyr/H0)
DTT_Gyr = (Tyr/H0)*DTT
DCMR_Gyr = (Tyr/H0)*DCMR
DCMR_Mpc = (c/H0)*DCMR
# tangential comoving distance
ratio = 1.00
x = sqrt(abs(WK))*DCMR
if x > 0.1:
if WK > 0:
ratio = 0.5*(exp(x)-exp(-x))/x
else:
ratio = sin(x)/x
else:
y = x*x
if WK < 0: y = -y
ratio = 1. + y/6. + y*y/120.
DCMT = ratio*DCMR
DA = az*DCMT
DA_Mpc = (c/H0)*DA
kpc_DA = DA_Mpc/206.264806
DA_Gyr = (Tyr/H0)*DA
DL = DA/(az*az)
DL_Mpc = (c/H0)*DL
DL_Gyr = (Tyr/H0)*DL
# comoving volume computation
ratio = 1.00
x = sqrt(abs(WK))*DCMR
if x > 0.1:
if WK > 0:
ratio = (0.125*(exp(2.*x)-exp(-2.*x))-x/2.)/(x*x*x/3.)
else:
ratio = (x/2. - sin(2.*x)/4.)/(x*x*x/3.)
else:
y = x*x
if WK < 0: y = -y
ratio = 1. + y/5. + (2./105.)*y*y
VCM = ratio*DCMR*DCMR*DCMR/3.
V_Gpc = 4.*pi*((0.001*c/H0)**3)*VCM
if verbose == 1:
print 'For H_o = ' + '%1.1f' % H0 + ', Omega_M = ' + '%1.2f' % WM + ', Omega_vac = ',
print '%1.2f' % WV + ', z = ' + '%1.3f' % z
print 'It is now ' + '%1.1f' % age_Gyr + ' Gyr since the Big Bang.'
print 'The age at redshift z was ' + '%1.1f' % zage_Gyr + ' Gyr.'
print 'The light travel time was ' + '%1.1f' % DTT_Gyr + ' Gyr.'
print 'The comoving radial distance, which goes into Hubbles law, is',
print '%1.1f' % DCMR_Mpc + ' Mpc or ' + '%1.1f' % DCMR_Gyr + ' Gly.'
print 'The comoving volume within redshift z is ' + '%1.1f' % V_Gpc + ' Gpc^3.'
print 'The angular size distance D_A is ' + '%1.1f' % DA_Mpc + ' Mpc or',
print '%1.1f' % DA_Gyr + ' Gly.'
print 'This gives a scale of ' + '%.2f' % kpc_DA + ' kpc/".'
print 'The luminosity distance D_L is ' + '%1.1f' % DL_Mpc + ' Mpc or ' + '%1.1f' % DL_Gyr + ' Gly.'
print 'The distance modulus, m-M, is '+'%1.2f' % (5*log10(DL_Mpc*1e6)-5)
else:
print '%1.2f' % zage_Gyr,
print '%1.2f' % DCMR_Mpc,
print '%1.2f' % kpc_DA,
print '%1.2f' % (5*log10(DL_Mpc*1e6)-5)
except IndexError:
print 'need some values or too many values'
except ValueError:
print 'nonsense value or option'
| sniemi/SamPy | cosmology/cc.py | Python | bsd-2-clause | 6,151 | 0.017396 |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2011 Governo do Estado do Rio Grande do Sul
#
# Author: Lincoln de Sousa <lincoln@gg.rs.gov.br>
# Author: Rodrigo Sebastiao da Rosa <rodrigo-rosa@procergs.rs.gov.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Web application definitions to the govp tool"""
from json import loads
from flask import Blueprint, render_template, redirect, current_app
from gd.utils.gdcache import cache, fromcache, tocache, removecache
from gd import auth
from gd.content.wp import wordpress #, gallery
from gd.utils import msg, format_csrf_error, dumps, twitts
from gd.govpergunta.forms import ContribForm
from gd.model import Contrib, session
# from gd.govpergunta.pairwise import Pairwise
THEMES = {'cuidado': u'Cuidado Integral',
'familia': u'Saúde da Família',
'emergencia': u'Urgência e Emergência',
'medicamentos': u'Acesso a Medicamentos',
'regional': u'Saúde na sua Região'}
govpergunta = Blueprint(
'govpergunta', __name__,
template_folder='templates',
static_folder='static')
# @govpergunta.route('/contribuir')
# def index():
# """Renders the index template"""
# form = ContribForm()
# return render_template('govpergunta.html', wp=wordpress, form=form)
# def _get_pairwise():
# """Helper function to get the pairwise instance saved in the
# session"""
# if ('pairwise' not in fsession) or \
# (fsession['version'] != PAIRWISE_VERSION):
# fsession['pairwise'] = Pairwise()
# fsession['version'] = PAIRWISE_VERSION
# return fsession['pairwise']
# @govpergunta.route('/')
# def index():
# pairwise = _get_pairwise()
# pair = pairwise.get_pair()
# fsession.modified = True
# return render_template(
# 'vote.html',
# pair=pair,
# theme=THEMES[pair['left'].theme]
# )
# @govpergunta.route('/invalidate')
# def invalidate():
# """With 50 votes, the user will be redirected to the index page and
# it's pairwise session will be destroied"""
# del fsession['pairwise']
# return redirect(url_for('index'))
# @govpergunta.route('/add_vote', methods=('POST',))
# def add_vote():
# if ('pairwise' not in fsession) or \
# (fsession['version'] != PAIRWISE_VERSION):
# return redirect(url_for('.index'))
# pairwise = fsession['pairwise']
# try:
# pairwise.vote(
# request.values.get('direction'),
# request.values.get('token'))
# fsession.modified = True
# except InvalidTokenError:
# pass
# return redirect(url_for('.index'))
@govpergunta.route('/')
def index():
return redirect('/govpergunta/resultados/')
# pagination, posts = wordpress.getPostsByTag(
# tag='governador-pergunta')
# images = gallery.search('GovernadorPergunta', limit=24)[::-1]
# videos = [wordpress.wpgd.getVideo(i) for i in (14, 16, 12)]
# return render_template(
# 'results.html', posts=posts, images=images, videos=videos)
@govpergunta.route('/resultados/')
@govpergunta.route('/resultados/<int:ano>/')
# @cache.memoize()
def resultados(ano=2012):
"""Renders a wordpress page special"""
cn = 'results-{0}'.format(ano)
slideshow = fromcache(cn) or tocache(cn,wordpress.getRecentPosts(
category_name='destaque-govpergunta-%s' % str(ano),
post_status='publish',
numberposts=4,
thumbsizes=['slideshow']))
categoria = 'resultados-gov-pergunta-%s' % str(ano)
retorno = fromcache("contribs-{0}".format(ano)) or \
tocache("contribs-{0}".format(ano) ,wordpress.wpgovp.getContribuicoes(principal='S',category=categoria))
menus = fromcache('menuprincipal') or tocache('menuprincipal', wordpress.exapi.getMenuItens(menu_slug='menu-principal') )
try:
twitter_hash_cabecalho = twitts()
except KeyError:
twitter_hash_cabecalho = ""
questions = None
for q in retorno:
if isinstance(q, list):
questions = q
return render_template(
'resultados.html',
menu=menus,
questions=questions,
sidebar=wordpress.getSidebar,
twitter_hash_cabecalho=twitter_hash_cabecalho,
ano=ano,
slideshow=slideshow,
wp=wordpress
)
@govpergunta.route('/resultados-detalhe/<int:postid>/')
# @cache.memoize()
def resultado_detalhe(postid):
"""Renders a contribution detail"""
principal = fromcache("res-detalhe-{0}".format(postid)) or \
tocache("res-detalhe-{0}".format(postid),wordpress.wpgovp.getContribuicoes(principal='S',postID=postid))
# print "PRINCIPAL +++++++++++++++++++++", principal[1][0]
retorno = fromcache("contribs-detalhe-{0}".format(postid)) or \
tocache("contribs-detalhe-{0}".format(postid),wordpress.wpgovp.getContribuicoes(principal='N',postID=postid))
# print "RETORNO +++++++++++++++++++++", retorno
comts = fromcache("com-res-detalhe-{0}".format(postid)) or \
tocache("com-res-detalhe-{0}".format(postid),wordpress.getComments(status='approve',post_id=postid))
qtd = retorno[0]
detalhes = retorno[1]
return render_template(
'resultados-detalhes.html',
agregadas=detalhes,
qtd_agregadas=qtd,
principal=principal[1][0],
comments=comts,
postid=postid
)
@govpergunta.route('/results/<path:path>')
# @cache.memoize()
def results_page(path):
page = fromcache("page-{0}".format(path)) or tocache("page-{0}".format(path),wordpress.getPageByPath(path))
return render_template('results_page.html', page=page)
@govpergunta.route('/contrib_json', methods=('POST',))
def contrib_json():
"""Receives a user contribution and saves to the database
This function will return a JSON format with the result of the
operation. That can be successful or an error, if it finds any
problem in data received or the lack of the authentication.
"""
if not auth.is_authenticated():
return msg.error(_(u'User not authenticated'))
raise Exception('Not funny')
form = ContribForm(csrf_enabled=False)
if form.validate_on_submit():
Contrib(
title=form.data['title'].encode('utf-8'),
content=form.data['content'].encode('utf-8'),
theme=form.data['theme'],
user=auth.authenticated_user())
session.commit()
# Returning the csrf
data = { 'data': _('Contribution received successful') }
data.update({ 'csrf': form.csrf.data })
return msg.ok(data)
else:
return format_csrf_error(form, form.errors, 'ValidationError')
# -- JSON API that publishes contributions
def _format_contrib(contrib):
"""Returns a dictionary representation of a contribution"""
return {
'id': contrib.id,
'title': contrib.title,
'content': contrib.content,
'creation_date': contrib.creation_date,
'theme': contrib.theme,
'moderation': contrib.moderation,
}
@govpergunta.route('/contribs/all.json')
# @cache.cached()
def contribs_all():
"""Lists all contributions in the JSON format"""
r = fromcache("contribs_all_") or tocache("contribs_all_",dumps([
_format_contrib(i)
for i in Contrib.query.filter_by(status=True)]))
return r
@govpergunta.route('/contribs/user.json')
# @cache.cached()
def contribs_user():
"""Lists all contributions in the JSON format"""
try:
user = auth.authenticated_user()
except auth.NobodyHome:
return dumps([])
return dumps([
_format_contrib(i)
for i in Contrib.query
.filter_by()
.filter(Contrib.user==user)])
@govpergunta.route('/contribs/choosen.json')
def contribs_choosen():
"""Lists all contributions in the JSON format"""
contribs = {}
for key in THEMES.keys():
contribs[key] = {'name': THEMES[key], 'children': []}
count = 11 if key == 'familia' else 10
for data in wordpress.pairwise.getSortedByScore(0, count, key)[0]:
cid = loads(data['data'])['id']
# This is _nasty_. The team that carried about organizing
# contribution approved something wrong. Yes, now we have
# invalid data on our db. This was the better way I figured
# out to fix it right now, but obviously something better
# must be done when we have more time.
if cid == 1213:
continue
contrib = Contrib.get(cid)
final = _format_contrib(contrib)
final['author'] = contrib.user.name
final['score'] = data['score']
final['votes'] = {
'score': data['score'],
'total': data['votes'],
'won': data['won'],
'lost': data['lost'],
}
final['children'] = []
for subcontrib in contrib.children:
subfinal = _format_contrib(subcontrib)
subfinal['author'] = subcontrib.user.name
final['children'].append(subfinal)
for subcontrib in Contrib.query.filter_by(parent=contrib.id):
subfinal = _format_contrib(subcontrib)
subfinal['author'] = subcontrib.user.name
final['children'].append(subfinal)
contribs[key]['children'].append(final)
return dumps(contribs)
# @govpergunta.route('/contribs/stats.json')
# def contribs_stats():
# """Lists all contributions in the JSON format"""
# def hammer_contrib(c):
# return '"%(name)s","%(email)s","%(city)s","%(phone)s",' + \
# '"%(title)s","%(theme)s"' % {
# 'theme': c.theme,
# 'name': c.user.name,
# 'email': c.user.email,
# 'city': c.user.get_meta('city'),
# 'phone': c.user.get_meta('phone'),
# 'title': c.title,
# }
#
# contribs = ["nome,email,cidade,telefone,titulo,tema"]
# for key in THEMES.keys():
# for data in wordpress.pairwise.getSortedByScore(0, 10, key)[0]:
# contrib = Contrib.get(loads(data['data'])['id'])
# contribs.append(hammer_contrib(contrib))
# for subcontrib in contrib.children:
# contribs.append(hammer_contrib(subcontrib))
# for subcontrib in Contrib.query.filter_by(parent=contrib.id):
# contribs.append(hammer_contrib(subcontrib))
# return '\n'.join(contribs)
| gabinetedigital/gd | gd/govpergunta/__init__.py | Python | agpl-3.0 | 11,253 | 0.00329 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Taco Module for Python documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 17 16:17:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../lib'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Taco Module for Python'
copyright = '2014, Graham Bell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TacoModuleforPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TacoModuleforPython.tex', 'Taco Module for Python Documentation',
'Graham Bell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tacomoduleforpython', 'Taco Module for Python Documentation',
['Graham Bell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TacoModuleforPython', 'Taco Module for Python Documentation',
'Graham Bell', 'TacoModuleforPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| grahambell/taco-python | doc/conf.py | Python | gpl-3.0 | 8,346 | 0.005991 |
import pyemto
import numpy as np
import os
latpath = "../../../../" # Path do bmdl, kstr and shape directories
# each system need to have same number of alloy elements
#systems = [['Fe','Al'],['Fe','Cr']]
#systems = [['Fe'],['Al']]
systems = [['Al']]
#concentrations = [[0.5,0.5]]
concentrations = [[1.0]]
magn = "NM" # Possible NM (Non-magnetic), FM (ferromagnetic) and
# DLM (Disordered local moments)
initial_sws = 3.0
# Check that initialsws is correct format
if type(initial_sws) is float:
initial_sws = [initial_sws for x in range(3)]
elif type(initial_sws) is list:
pass
else:
print("ERROR: Initialsws should be float or list of 3 floats")
exit()
if not len(initial_sws) == 3:
print("ERROR: intialsws shoubd be a float or list of 3 floats!")
exit()
# Sanity checks
for s in systems:
if not len(s) == len(systems[0]):
print("Each system need to have same number of alloy elements!")
exit()
for c in concentrations:
if not len(c) == len(systems[0]):
print("Each given concetrations must have same number number as elements in system!")
exit()
# Next check magnetic states of system and initialize splits
splits = []
if magn == "FM":
afm = "F"
for s in systems:
splt = []
for atom in s:
if atom == "Fe":
splt.append(2.0)
else:
splt.append(0.5)
splits.append(splt)
elif magn == "DLM":
afm = "F"
# First duplicate each atoms and concetration
newsystems = []
newconcs = []
for i in range(len(systems)):
news = []
newc = []
splt = []
for j in range(len(systems[i])):
news.append(systems[i][j])
news.append(systems[i][j])
if systems[i][j] == "Fe":
splt.append( 2.0)
splt.append(-2.0)
else:
splt.append( 0.5)
splt.append(-0.5)
splits.append(splt)
newsystems.append(news)
systems = newsystems
for c in concentrations:
newc = []
for conc in c:
newc.append(conc)
newc.append(conc)
newconcs.append(newc)
concentrations = newconcs
elif magn == "NM":
afm = "P"
for s in systems:
splt = []
for atom in s:
splt.append(0.0)
splits.append(splt)
else:
print("Wrong magnetic state is given: " + magn)
print("Should be one of NM, FM or DLM!")
exit()
results = []
#We are ready to make inputs
for si in range(len(systems)):
s = systems[si]
split = splits[si]
# Create main directory
sname = ""
if magn == "DLM":
nlist = [s[i] for i in range(0,len(s),2)]
else:
nlist = s
for atom in nlist:
sname = sname + atom
#
# Make directories
if not os.path.lexists(sname):
os.makedirs(sname)
for c in concentrations:
sc_res = []
# Make subdirectory for concentration
cname = ""
count = 0
if magn == "DLM":
clist = [c[i] for i in range(0,len(c),2)]
else:
clist = c
for conc in clist:
count += 1
cname = cname +str(int(conc*1000)).zfill(4)
if not count == len(clist):
cname = cname+"-"
apath = os.path.join(sname,cname)
if not os.path.lexists(apath):
os.makedirs(apath)
# Make subdirectory for magnetic state
apath = os.path.join(apath,magn)
if not os.path.lexists(apath):
os.makedirs(apath)
# Construct base jobname
jobname = ""
for i in range(len(nlist)):
if jobname == "":
pass
else:
jobname = jobname + "_"
jobname = jobname + nlist[i].lower() + "%4.2f" % (clist[i])
finalname = jobname + "_final"
# BCC first
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[0] # We need some clever way to get this
alloy.bulk(lat='bcc', jobname=jobname+"_bcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
sc_res.append([e0,B0,sws0])
alloy.bulk(lat='bcc',
jobname=finalname+"_bcc",
latpath=latpath,
sws=sws0,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21)
alloy.write_inputs()
# FCC second
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[1] # We need some clever way to get this
alloy.bulk(lat='fcc', jobname=jobname+"_fcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
sc_res.append([e0,B0,sws0])
alloy.bulk(lat='fcc',
jobname=finalname+"_fcc",
latpath=latpath,
sws=sws0,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21)
alloy.write_inputs()
# HCP last
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[2] # We need some clever way to get this
alloy.bulk(lat='hcp',jobname=jobname,latpath=latpath,
sws=initialsws, atoms = s,concs = c, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, c_over_a0, B0, e0, R0, cs0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
alloy.sws = sws0
ca = round(c_over_a0,3)
sc_res.append([e0,B0,sws0,c_over_a0])
# Check is bmdl, kstr and kstr exsist with correct c over a
hcpname ="hcp_"+str(ca) # Structure name
strucpath = "../"
# Check if input files are in place
if os.path.exists(os.path.join(strucpath,hcpname+".bmdl")):
pass
else:
print("Making structures")
# make input files
alloy.lattice.set_values(jobname=hcpname,latpath="",
lat='hcp',kappaw=[0.0,-20.0],msgl=0,ca=ca,
dmax=2.2)
alloy.lattice.bmdl.write_input_file(folder=strucpath)
alloy.lattice.kstr.write_input_file(folder=strucpath)
alloy.lattice.shape.write_input_file(folder=strucpath)
alloy.lattice.batch.write_input_file(folder=strucpath)
# Make kfcd and kgrn input files
alloy.bulk(lat='hcp',
jobname=finalname+"_hcp",
latpath=latpath,
latname=hcpname,
sws=sws0,
ca= ca,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21,
nkz=17)
alloy.write_inputs()
results.append([[s,c],sc_res])
print("Results obtained:")
for r in results:
# Generate system name
sname = ""
for i in range(len(r[0][0])):
sname=sname+r[0][0][i]+str(r[0][1][i])
output = "System: "+sname+"\n"
output = output + " Magn: " +magn+"\n"
bcc = r[1][0]
output = output+"# Strc. E sws B (c/a)\n"
output = output+" bcc: %f %f %f\n" %(bcc[0],bcc[1],bcc[2])
fcc = r[1][1]
output = output + " fcc: %f %f %f\n" %(fcc[0],fcc[1],fcc[2])
hcp = r[1][2]
output = output +" hpc: %f %f %f %f\n" %(hcp[0],hcp[1],hcp[2],hcp[3])
print(output)
| hpleva/pyemto | pyemto/examples/alloy_discovery/make_alloy_final.py | Python | mit | 8,645 | 0.014112 |
import pandas as pd
from syscore.algos import calculate_weighted_average_with_nans
from syscore.genutils import str2Bool
from syscore.dateutils import ROOT_BDAYS_INYEAR
from syscore.pdutils import turnover
from sysquant.estimators.turnover import turnoverDataForTradingRule
from systems.system_cache import diagnostic, input
from systems.accounts.account_inputs import accountInputs
class accountCosts(accountInputs):
@diagnostic()
def get_SR_cost_for_instrument_forecast(
self, instrument_code: str, rule_variation_name: str
) -> float:
"""
Get the SR cost for a forecast/rule combination
:param instrument_code: instrument to get values for
:type instrument_code: str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
KEY OUTPUT
"""
transaction_cost = self.get_SR_transaction_cost_for_instrument_forecast(
instrument_code = instrument_code,
rule_variation_name = rule_variation_name
)
holding_cost = self.get_SR_holding_cost_only(instrument_code)
return transaction_cost + holding_cost
@diagnostic()
def get_SR_transaction_cost_for_instrument_forecast(
self, instrument_code: str, rule_variation_name: str
) -> float:
"""
Get the SR cost for a forecast/rule combination
:param instrument_code: instrument to get values for
:type instrument_code: str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
KEY OUTPUT
"""
use_pooled_costs = str2Bool(
self.config.forecast_cost_estimates["use_pooled_costs"]
)
if use_pooled_costs:
SR_cost = self._get_SR_cost_for_rule_with_pooled_costs(
instrument_code, rule_variation_name
)
else:
SR_cost = self._get_SR_cost_of_rule_for_individual_instrument(
instrument_code, rule_variation_name
)
return SR_cost
@input
def _get_SR_cost_for_rule_with_pooled_costs(
self, instrument_code: str, rule_variation_name: str
) -> float:
instrument_code_list = self.has_same_rules_as_code(instrument_code)
SR_cost = self._get_SR_cost_instr_forecast_for_list(
instrument_code_list, rule_variation_name
)
return SR_cost
@diagnostic()
def _get_SR_cost_instr_forecast_for_list(
self, instrument_code_list: list, rule_variation_name: str
) -> float:
"""
Get the SR cost for a forecast/rule combination, averaged across multiple instruments
:param instrument_code_list: instrument to get values for
:type instrument_code: str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
"""
list_of_SR_cost = [
self._get_SR_cost_of_rule_for_individual_instrument(
instrument_code, rule_variation_name
)
for instrument_code in instrument_code_list
]
# weight by length
cost_weightings = self._get_forecast_length_weighting_for_list_of_instruments(
instrument_code_list, rule_variation_name
)
weighted_SR_costs = [
SR_cost * weight
for SR_cost, weight in zip(list_of_SR_cost, cost_weightings)
]
avg_SR_cost = sum(weighted_SR_costs)
return avg_SR_cost
@diagnostic()
def _get_forecast_length_weighting_for_list_of_instruments(
self, instrument_code_list: list, rule_variation_name: str
) -> list:
forecast_lengths = [
self._get_forecast_length_for_instrument_rule(
instrument_code, rule_variation_name
)
for instrument_code in instrument_code_list
]
total_length = float(sum(forecast_lengths))
weights = [
forecast_length / total_length for forecast_length in forecast_lengths
]
return weights
@diagnostic()
def _get_forecast_length_for_instrument_rule(
self, instrument_code: str, rule_variation_name: str
) -> int:
forecast = self.get_capped_forecast(instrument_code, rule_variation_name)
return len(forecast)
@diagnostic()
def _get_SR_cost_of_rule_for_individual_instrument(
self, instrument_code: str, rule_variation_name: str
) -> float:
# note the turnover may still be pooled..
turnover = self.forecast_turnover(instrument_code, rule_variation_name)
SR_cost = self.get_SR_cost_given_turnover(instrument_code, turnover)
return SR_cost
@diagnostic()
def get_SR_cost_given_turnover(
self, instrument_code: str, turnover: float
) -> float:
SR_cost_trading = self.get_SR_trading_cost_only_given_turnover(
instrument_code, turnover
)
SR_cost_holding = self.get_SR_holding_cost_only(instrument_code)
SR_cost = SR_cost_holding + SR_cost_trading
return SR_cost
def get_SR_trading_cost_only_given_turnover(
self, instrument_code: str, turnover: float
) -> float:
cost_per_trade = self.get_SR_cost_per_trade_for_instrument(instrument_code)
SR_cost_trading = turnover * cost_per_trade
return SR_cost_trading
def get_SR_holding_cost_only(self, instrument_code: str) -> float:
cost_per_trade = self.get_SR_cost_per_trade_for_instrument(instrument_code)
hold_turnovers = self.get_rolls_per_year(instrument_code) / 2.0
SR_cost_holding = hold_turnovers * cost_per_trade
return SR_cost_holding
@diagnostic()
def get_turnover_for_forecast_combination(
self, codes_to_use: list, rule_variation_name: str
) -> turnoverDataForTradingRule:
turnover_as_list = self._forecast_turnover_for_list_by_instrument(
codes_to_use, rule_variation_name=rule_variation_name
)
turnover_as_dict = dict(
[
(instrument_code, turnover)
for (instrument_code, turnover) in zip(codes_to_use, turnover_as_list)
]
)
turnover_data_for_trading_rule = turnoverDataForTradingRule(turnover_as_dict)
return turnover_data_for_trading_rule
@diagnostic()
def forecast_turnover(
self, instrument_code: str, rule_variation_name: str
) -> float:
use_pooled_turnover = str2Bool(
self.config.forecast_cost_estimates["use_pooled_turnover"]
)
if use_pooled_turnover:
turnover = self._forecast_turnover_pooled(
instrument_code, rule_variation_name
)
else:
turnover = self._forecast_turnover_for_individual_instrument(
instrument_code, rule_variation_name
)
return turnover
@diagnostic()
def _forecast_turnover_pooled(
self, instrument_code: str, rule_variation_name: str
) -> float:
instrument_code_list = self.has_same_rules_as_code(instrument_code)
turnover_for_SR = self._forecast_turnover_for_list(
instrument_code_list, rule_variation_name=rule_variation_name
)
return turnover_for_SR
@diagnostic()
def _forecast_turnover_for_list(
self, instrument_code_list: list, rule_variation_name: str
) -> float:
"""
Get the average turnover for a rule, over instrument_code_list
:param instrument_code_list: instruments to get values for
:type instrument_code_list: list of str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
"""
turnovers = self._forecast_turnover_for_list_by_instrument(
codes_to_use=instrument_code_list, rule_variation_name=rule_variation_name
)
# weight by length
weights = self._get_forecast_length_weighting_for_list_of_instruments(
instrument_code_list, rule_variation_name
)
avg_turnover = calculate_weighted_average_with_nans(weights, turnovers)
return avg_turnover
@diagnostic()
def _forecast_turnover_for_list_by_instrument(
self, codes_to_use: list, rule_variation_name: str
) -> list:
turnovers = [
self._forecast_turnover_for_individual_instrument(
instrument_code, rule_variation_name
)
for instrument_code in codes_to_use
]
return turnovers
@diagnostic()
def _forecast_turnover_for_individual_instrument(
self, instrument_code: str, rule_variation_name: str
) -> float:
forecast = self.get_capped_forecast(instrument_code, rule_variation_name)
average_forecast_for_turnover = self.average_forecast()
annual_turnover_for_forecast = turnover(forecast, average_forecast_for_turnover)
return annual_turnover_for_forecast
@diagnostic()
def get_SR_cost_per_trade_for_instrument(self, instrument_code: str) -> float:
"""
Get the vol normalised SR costs for an instrument
:param instrument_code: instrument to value for
:type instrument_code: str
:returns: float
>>> from systems.basesystem import System
>>> from systems.tests.testdata import get_test_object_futures_with_portfolios
>>> (portfolio, posobject, combobject, capobject, rules, rawdata, data, config)=get_test_object_futures_with_portfolios()
>>> system=System([portfolio, posobject, combobject, capobject, rules, rawdata, Account()], data, config)
>>>
>>> system.accounts.get_SR_cost_per_trade_for_instrument("EDOLLAR")
0.0065584086244069775
"""
cost_in_percentage_terms = (
self._get_SR_cost_per_trade_for_instrument_percentage(instrument_code)
)
avg_annual_vol_perc = self._recent_average_annual_perc_vol(instrument_code)
# cost per round trip
SR_cost = 2.0 * cost_in_percentage_terms / avg_annual_vol_perc
return SR_cost
@diagnostic()
def _get_SR_cost_per_trade_for_instrument_percentage(
self, instrument_code: str
) -> float:
raw_costs = self.get_raw_cost_data(instrument_code)
block_price_multiplier = self.get_value_of_block_price_move(instrument_code)
average_price = self._recent_average_price(instrument_code)
notional_blocks_traded = 1
cost_in_percentage_terms = raw_costs.calculate_cost_percentage_terms(
blocks_traded=notional_blocks_traded,
block_price_multiplier=block_price_multiplier,
price=average_price,
)
return cost_in_percentage_terms
@diagnostic()
def _recent_average_price(self, instrument_code: str) -> float:
daily_price = self.get_daily_price(instrument_code)
start_date = self._date_one_year_before_end_of_price_index(instrument_code)
average_price = float(daily_price[start_date:].mean())
return average_price
@diagnostic()
def _date_one_year_before_end_of_price_index(self, instrument_code: str):
daily_price = self.get_daily_price(instrument_code)
last_date = daily_price.index[-1]
start_date = last_date - pd.DateOffset(years=1)
return start_date
@diagnostic()
def _recent_average_annual_perc_vol(self, instrument_code: str) -> float:
average_vol = self._recent_average_daily_vol(instrument_code)
avg_annual_vol = average_vol * ROOT_BDAYS_INYEAR
average_price = self._recent_average_price(instrument_code)
avg_annual_vol_perc = avg_annual_vol / average_price
return avg_annual_vol_perc
@diagnostic()
def _recent_average_daily_vol(self, instrument_code: str) -> float:
daily_vol = self.get_daily_returns_volatility(instrument_code)
start_date = self._date_one_year_before_end_of_price_index(instrument_code)
average_vol = float(daily_vol[start_date:].mean())
return average_vol
@property
def use_SR_costs(self) -> float:
return str2Bool(self.config.use_SR_costs)
| robcarver17/pysystemtrade | systems/accounts/account_costs.py | Python | gpl-3.0 | 12,395 | 0.002098 |
# coding=utf-8
from __future__ import unicode_literals
from .. import BaseProvider
class Provider(BaseProvider):
def ean(self, length=13):
code = [self.random_digit() for i in range(length - 1)]
if length not in (8, 13):
raise AssertionError("length can only be 8 or 13")
if length == 8:
weights = [3, 1, 3, 1, 3, 1, 3]
elif length == 13:
weights = [1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3]
weighted_sum = sum([x * y for x, y in zip(code, weights)])
check_digit = (10 - weighted_sum % 10) % 10
code.append(check_digit)
return ''.join(str(x) for x in code)
def ean8(self):
return self.ean(8)
def ean13(self):
return self.ean(13)
| Nebucatnetzer/tamagotchi | pygame/lib/python3.4/site-packages/faker/providers/barcode/__init__.py | Python | gpl-2.0 | 761 | 0 |
from django.conf.urls.defaults import *
from django.conf import settings
prefix = settings.URL_PREFIX
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import RedirectView
urlpatterns = patterns('',) # init
# November 20, 2013
#
# We have three server types for now:
#
# 1) admin: shows only the admin, and live database (for stats)
#
# 2) local: shows everything, local database
#
# 3) live: shows only the site, and live database
#
#
# TODO: add a staging site + DB for testing features before live
if settings.ADMIN_SERVER:
# online admin: no other controllers are defined
from django.contrib import admin
admin.autodiscover()
urlpatterns += patterns('',
# standard admin urls
(r'^', include(admin.site.urls) ),
# April 8, 2014: hack to prevent 404 and 500 pages from throwing errors
url(r'^homepage$', RedirectView.as_view(url='/'), name='homepage'),
url(r'^contact$', RedirectView.as_view(url='/'), name='contact'),
)
else:
if settings.LOCAL_SERVER:
# load admin on LOCAL too, but on a sub-url path
from django.contrib import admin
admin.autodiscover()
# from myutils.adminextra import custom_admin_views
# from concepts.databrowse_load import *
urlpatterns += patterns('',
# Customized views for the application admin home
# (r'^'+prefix+'admin/(concepts/)$', custom_admin_views.concepts),
# (r'^'+prefix+'admin/contributions/$', poms_custom_admin_views.contributions),
# standard admin urls
(r'^'+prefix+'admin/', include(admin.site.urls) ),
# url(r'^'+prefix+'databrowse/(.*)', databrowse.site.root, name='databrowsehome'),
)
# standard urls for LOCAL & LIVE
urlpatterns += patterns('',
# Registration app
(r'^registration/', include('registration.backends.default.urls')),
# Koncepts app
url(r'^', include('koncepts.urls')),
)
if settings.LOCAL_SERVER: # ===> static files on local machine
urlpatterns += staticfiles_urlpatterns()
urlpatterns += patterns('',
(r'^media/uploads/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
| lambdamusic/testproject | konproj/urls.py | Python | gpl-2.0 | 2,206 | 0.047597 |
"""A pseudo-file mapped into memory
Provides better performance for frequent reads/writes,
and makes reading/writing easier via regions (windows)
of memory. Allows memory to be accessed via array reads/
writes as well.
"""
import mmap
import logging
log = logging.getLogger(__name__)
class ReadOnlyError(Exception):
"""The mapped file is flagged as read-only"""
def __init__(self, path):
super(ReadOnlyError, self).__init__('mapped file is flagged as read only: %s' % path)
class RegionOverflowError(Exception):
"""Data at an offset was requested but the offset was greater than the allocated size"""
def __init__(self, offset):
super(RegionOverflowError, self).__init__('region overflow offset: %d (did you allocate?)' % offset)
class Region(object):
"""A virtual region of mapped memory
This class is a 'faked' mmap() result that allows for the finer allocation of memory mappings
beyond/below what the filesystem really allows. It is backed by true mmap()'d pages and
uses magic methods to achieve the appearance of being an isolated region of memory."""
__slots__ = 'parent', 'base_offset', '__size', 'cursor'
def __init__(self, parent, base_offset, size):
self.parent = parent
self.base_offset = base_offset
self.__size = size
self.cursor = 0
def __len__(self):
return self.__size
def __str__(self):
return str(self.read(offset=0, length=len(self)))
def __enter__(self):
return self
def __exit__(self, tipo, value, traceback):
return self
def region(self, offset=-1, size=-1):
(offset, size) = self._sanitize_segment(offset, size)
return self.parent.region(self.base_offset + offset, size)
def _sanitize_segment(self, offset, length):
if offset >= len(self):
raise ValueError('offset falls outside region size')
elif offset < 0:
offset = self.cursor
if length == 0:
raise ValueError('length must be at least 1')
elif length < 0:
length = len(self) - offset
return (offset, length)
def read(self, length=-1, offset=-1, advance=True):
(offset, length) = self._sanitize_segment(offset, length)
offset += self.base_offset
result = self.parent.read(length, offset, advance=advance)
if advance:
self.cursor += len(result)
return result
def write(self, value, length=-1, offset=-1, advance=True):
if length < 0:
length = len(value)
(offset, length) = self._sanitaize_segment(offset, length)
offset += self.base_offset
result = self.parent.write(value, length, offset, advance=advance)
if advance:
self.cursor += result
return result
class MappedFile(Region):
"""Manages mmap()-ings of a file into vmem.
This class prevents virtual address space from growing too large by
re-using existing maps if the requested regions have already been mapped.
"""
def __init__(self, path, page_count, read_only=False):
# XXX TODO NOTE remove this line when write functionality is added.
read_only = True
# getting 'too many files open' error? increase the constant on the next line
# (must be an exponent of 2)
self._page_size = page_count * mmap.PAGESIZE
# make sure we're sane here - allocation granularity needs to divide into page size!
assert (self._page_size % mmap.ALLOCATIONGRANULARITY) == 0, 'page size is not a multiple of allocation granularity!'
self._file = open(path, 'r+b')
self._pages = dict()
self.read_only = read_only
self._path = path
self.cursor = 0
super(MappedFile, self).__init__(self, base_offset=0, size=len(self))
def __len__(self):
self._file.seek(0, 2)
size = self._file.tell()
return size
def __del__(self):
self.close()
def close(self):
"""Unmaps all mappings"""
for i in self._pages:
self._pages[i].close()
self._file.close()
def region(self, offset, size):
"""Requests a virtual region be 'allocated'"""
lower_page = offset - (offset % self._page_size)
upper_page = ((offset + size) // self._page_size) * self._page_size
lower_page_id = lower_page // self._page_size
upper_page_id = upper_page // self._page_size
# make sure we're mapped
for i in range(lower_page_id, upper_page_id + 1):
if i not in self._pages:
page_offset = i * self._page_size
page_size = min(self._page_size, len(self) - page_offset)
log.debug('mapping vfile page: id=%d offset=%d size=%d', i, page_offset, page_size)
self._pages[i] = mmap.mmap(self._file.fileno(), offset=page_offset, length=page_size)
# create a region
return Region(self, base_offset=offset, size=size)
def read(self, length=1, offset=-1, advance=True):
"""Reads data from the virtual region"""
(offset, length) = self._sanitize_segment(offset, length)
results = []
length = min(length, len(self))
abs_offset = offset
cur_page = abs_offset // self._page_size
abs_offset %= self._page_size
while length > 0:
readable = self._page_size - abs_offset
readable = min(readable, length)
results.append(self._pages[cur_page][abs_offset:abs_offset + readable])
length -= readable
abs_offset = 0
cur_page += 1
result = ''.join(results)
if advance:
self.cursor += len(result)
return result
def write(self, value, offset=-1, length=-1, advance=True):
if self.read_only:
raise ReadOnlyError(self._path)
# TODO
assert False, 'not implemented'
return 0
def __getitem__(self, offset):
if isinstance(offset, slice):
(start, fin, step) = offset.indices(len(self))
result = self.read(offset=start, length=fin - start)
if step not in [None, 1]:
result = result[::step]
return result
if not isinstance(offset, int):
raise TypeError('offset is not an integer: %s' % repr(offset))
if offset >= len(self):
raise RegionOverflowError(offset)
page = offset // self._page_size
rel_offset = offset % self._page_size
return self._pages[page][rel_offset]
def __setitem__(self, offset, value):
if self.read_only:
raise ReadOnlyError(self._path)
if isinstance(offset, slice):
raise ValueError('Slice assignment not supported in mapped files; assemble your data first and then write')
if not isinstance(offset, int):
raise TypeError('offset is not an integer: %s' % repr(offset))
if offset >= len(self):
raise RegionOverflowError(offset)
page = offset // self._page_size
rel_offset = offset % self._page_size
self._pages[page][rel_offset] = value
| Qix-/starfuse | starfuse/fs/mapped_file.py | Python | mit | 7,231 | 0.001798 |
# Copyright (c) 2012-2016 Seafile Ltd.
from django.contrib import admin
# Register your models here.
| miurahr/seahub | seahub/role_permissions/admin.py | Python | apache-2.0 | 102 | 0 |
import logging
from datetime import datetime
import math
import numpy as np
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
from ..models import SchoolWithDescription2020
from ..utilities import init_flask, time_delta, chunks, ItmToWGS84
school_fields = {
"school_id": "סמל_מוסד",
"school_name": "שם_מוסד",
"municipality_name": "שם_רשות",
"yishuv_name": "שם_ישוב",
"institution_type": "סוג_מוסד",
"lowest_grade": "משכבה",
"highest_grade": "עד_שכבה",
"location_accuracy": "רמת_דיוק_מיקום",
"x": "X",
"y": "Y",
}
app = init_flask()
db = SQLAlchemy(app)
coordinates_converter = ItmToWGS84()
def get_numeric_value(value, func):
"""
:returns: value if parameter value exists OR None if the parameter value does not exist
"""
return func(value) if value and not np.isnan(value) else None
def get_str_value(value):
"""
:returns: value if parameter value exists OR None if the parameter value does not exist
"""
return value if value and value not in ["nan", "Nan", "NaN", "NAN"] else None
def get_schools_with_description(schools_description_filepath, schools_coordinates_filepath):
logging.info("\tReading schools description data from '%s'..." % schools_description_filepath)
df_schools = pd.read_excel(schools_description_filepath)
logging.info("\tReading schools coordinates data from '%s'..." % schools_coordinates_filepath)
df_coordinates = pd.read_excel(schools_coordinates_filepath)
schools = []
# get school_id
df_schools = df_schools.drop_duplicates(school_fields["school_id"])
# sort by school_id
df_schools = df_schools.sort_values(school_fields["school_id"], ascending=True)
all_schools_tuples = []
for _, school in df_schools.iterrows():
school_id = get_numeric_value(school[school_fields["school_id"]], int)
school_name = get_str_value(school[school_fields["school_name"]]).strip('"')
if school_id in list(df_coordinates[school_fields["school_id"]].values):
x_coord = df_coordinates.loc[
df_coordinates[school_fields["school_id"]] == school_id, school_fields["x"]
].values[0]
y_coord = df_coordinates.loc[
df_coordinates[school_fields["school_id"]] == school_id, school_fields["y"]
].values[0]
location_accuracy = get_str_value(
df_coordinates.loc[
df_coordinates[school_fields["school_id"]] == school_id,
school_fields["location_accuracy"],
].values[0]
)
else:
x_coord = None
y_coord = None
location_accuracy = None
if x_coord and not math.isnan(x_coord) and y_coord and not math.isnan(y_coord):
longitude, latitude = coordinates_converter.convert(x_coord, y_coord)
else:
longitude, latitude = (
None,
None,
) # otherwise yield will produce: UnboundLocalError: local variable referenced before assignment
# Don't insert duplicates of 'school_name','x', 'y'
school_tuple = (school_name, x_coord, y_coord)
if school_tuple in all_schools_tuples:
continue
else:
all_schools_tuples.append(school_tuple)
school = {
"school_id": get_numeric_value(school[school_fields["school_id"]], int),
"school_name": school_name,
"municipality_name": get_str_value(school[school_fields["municipality_name"]]),
"yishuv_name": get_str_value(school[school_fields["yishuv_name"]]),
"institution_type": get_str_value(school[school_fields["institution_type"]]),
"lowest_grade": get_str_value(school[school_fields["lowest_grade"]]),
"highest_grade": get_str_value(school[school_fields["highest_grade"]]),
"location_accuracy": location_accuracy,
"longitude": longitude,
"latitude": latitude,
"x": x_coord,
"y": y_coord,
}
if school["institution_type"] in [
"בית ספר",
"תלמוד תורה",
"ישיבה קטנה",
'בי"ס תורני',
"ישיבה תיכונית",
'בי"ס חקלאי',
'בי"ס רפואי',
'בי"ס כנסייתי',
"אולפנה",
'בי"ס אקסטרני',
'בי"ס קיבוצי',
"תלמוד תורה ליד מעיין חינוך התורני",
'בי"ס מושבי',
]:
schools.append(school)
return schools
def truncate_schools_with_description():
curr_table = "schools_with_description"
sql_truncate = "TRUNCATE TABLE " + curr_table
db.session.execute(sql_truncate)
db.session.commit()
logging.info("Truncated table " + curr_table)
def import_to_datastore(schools_description_filepath, schools_coordinates_filepath, batch_size):
try:
assert batch_size > 0
started = datetime.now()
schools = get_schools_with_description(
schools_description_filepath, schools_coordinates_filepath
)
truncate_schools_with_description()
new_items = 0
logging.info("inserting " + str(len(schools)) + " new schools")
for schools_chunk in chunks(schools, batch_size):
db.session.bulk_insert_mappings(SchoolWithDescription2020, schools_chunk)
db.session.commit()
new_items += len(schools)
logging.info(f"\t{new_items} items in {time_delta(started)}")
return new_items
except Exception as exception:
error = f"Schools import succeeded partially with {new_items} schools. Got exception : {exception}"
raise Exception(error)
def parse(schools_description_filepath, schools_coordinates_filepath, batch_size):
started = datetime.now()
total = import_to_datastore(
schools_description_filepath=schools_description_filepath,
schools_coordinates_filepath=schools_coordinates_filepath,
batch_size=batch_size,
)
db.session.execute(
"UPDATE schools_with_description SET geom = ST_SetSRID(ST_MakePoint(longitude,latitude),4326)\
WHERE geom IS NULL;"
)
logging.info("Total: {0} schools in {1}".format(total, time_delta(started)))
| hasadna/anyway | anyway/parsers/schools_with_description_2020.py | Python | mit | 6,488 | 0.003967 |
"""
EventHandler handles all events. The handler sets on every object.
"""
import random
from muddery.utils import defines
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils import utils
from muddery.worlddata.data_sets import DATA_SETS
from django.conf import settings
from django.apps import apps
from evennia.utils import logger
PERMISSION_BYPASS_EVENTS = {perm.lower() for perm in settings.PERMISSION_BYPASS_EVENTS}
def get_event_additional_model():
"""
Set a dict of additional model names.
"""
additional_model = {}
# list event's additional data's model
for data_settings in DATA_SETS.event_additional_data:
for record in data_settings.objects.all():
key = record.serializable_value("key")
additional_model[key] = data_settings.model_name
return additional_model
class EventHandler(object):
"""
"""
_additional_model = get_event_additional_model()
def __init__(self, owner):
"""
Initialize the handler.
"""
self.owner = owner
self.events = {}
# Load events.
event_records = DATA_SETS.event_data.objects.filter(trigger_obj=owner.get_data_key())
for record in event_records:
event = {}
# Set data.
event_type = record.type
trigger_type = record.trigger_type
for field in record._meta.fields:
event[field.name] = record.serializable_value(field.name)
event["type"] = event_type
# Set additional data.
if record.key in self._additional_model:
model_name = self._additional_model[record.key]
model_additional = apps.get_model(settings.WORLD_DATA_APP, model_name)
try:
add_record = model_additional.objects.get(key = record.key)
# Set data.
for add_field in add_record._meta.fields:
event[add_field.name] = add_record.serializable_value(add_field.name)
except Exception, e:
pass
if not trigger_type in self.events:
self.events[trigger_type] = []
self.events[trigger_type].append(event)
def can_bypass(self, character):
"""
If the character can bypass the event.
"""
if not character:
return False
if character.player:
if character.player.is_superuser:
# superusers can bypass events
return True
for perm in character.player.permissions.all():
if perm in PERMISSION_BYPASS_EVENTS:
# has permission to bypass events
return True
#########################
#
# Event triggers
#
#########################
def get_function(self, event_type):
"""
Get the function of the event type.
"""
if event_type == defines.EVENT_ATTACK:
return self.do_attack
elif event_type == defines.EVENT_DIALOGUE:
return self.do_dialogue
def at_character_move_in(self, character):
"""
Called when a character moves in the event handler's owner, usually a room.
"""
if not character:
return
if self.can_bypass(character):
return
if defines.EVENT_TRIGGER_ARRIVE in self.events:
for event in self.events[defines.EVENT_TRIGGER_ARRIVE]:
# If has arrive event.
if STATEMENT_HANDLER.match_condition(event["condition"], character, self.owner):
# If matches the condition.
function = self.get_function(event["type"])
if function:
function(event, character)
def at_character_move_out(self, character):
"""
Called when a character moves out of a room.
"""
pass
def at_character_die(self):
"""
Called when a character is killed.
"""
owner = self.owner
if not owner:
return
if self.can_bypass(owner):
return
if defines.EVENT_TRIGGER_DIE in self.events:
for event in self.events[defines.EVENT_TRIGGER_DIE]:
#If has die event.
if STATEMENT_HANDLER.match_condition(event["condition"], owner, None):
# If matches the condition, run event on the owner.
function = self.get_function(event["type"])
if function:
function(event, self)
def at_character_kill(self, killers):
"""
Called when a character kills others.
This event is set on the character who is killed, and take effect on the killer!
"""
if defines.EVENT_TRIGGER_KILL in self.events:
for event in self.events[defines.EVENT_TRIGGER_KILL]:
# If has kill event.
for killer in killers:
if self.can_bypass(killer):
continue
if STATEMENT_HANDLER.match_condition(event["condition"], killer, self.owner):
function = self.get_function(event["type"])
if function:
function(event, killer)
def at_character_traverse(self, character):
"""
Called before a character traverses an exit.
If returns true, the character can pass the exit, else the character can not pass the exit.
"""
if not character:
return True
if self.can_bypass(character):
return True
triggered = False
if defines.EVENT_TRIGGER_TRAVERSE in self.events:
for event in self.events[defines.EVENT_TRIGGER_TRAVERSE]:
# If has traverse event.
if STATEMENT_HANDLER.match_condition(event["condition"], character, self.owner):
# If matches the condition.
triggered = True
function = self.get_function(event["type"])
if function:
function(event, character)
return not triggered
def do_attack(self, event, character):
"""
Start a combat.
"""
rand = random.random()
# If matches the odds, put the character in combat.
# There can be several mods with different odds.
if rand <= event["odds"]:
# Attack mob.
character.attack_temp_target(event["mob"], event["level"], event["desc"])
def do_dialogue(self, event, character):
"""
Start a dialogue.
"""
# Get sentence.
npc = None
if event["npc"]:
npc = utils.search_obj_data_key(event["npc"])
if npc:
npc = npc[0]
character.show_dialogue(npc, event["dialogue"], 0)
| MarsZone/DreamLand | muddery/utils/event_handler.py | Python | bsd-3-clause | 7,071 | 0.003253 |
import unittest
from subprocess import call, DEVNULL
import time
from tests.docker import docker_util
class VMHelper(object):
def __init__(self, vm_name: str, shell: str = "", ssh_username: str = None, ssh_port: str = None):
self.vm_name = vm_name
self.shell = shell # like cmd.exe /c
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.use_ssh = self.ssh_username is not None and self.ssh_port is not None
self.__vm_is_up = False
def start_vm(self):
call('VBoxManage startvm "{0}"'.format(self.vm_name), shell=True)
def stop_vm(self, save=True):
if save:
call('VBoxManage controlvm "{0}" savestate'.format(self.vm_name), shell=True)
return
if self.use_ssh:
self.send_command("sudo shutdown -h now")
else:
call('VBoxManage controlvm "{0}" acpipowerbutton'.format(self.vm_name), shell=True)
def wait_for_vm_up(self):
if not self.__vm_is_up:
print("Waiting for {} to come up.".format(self.vm_name))
command = "ping -c 1" if self.use_ssh else "ping -n 1"
command += " github.com"
while self.__send_command(command, hide_output=True, print_command=False) != 0:
time.sleep(1)
self.__vm_is_up = True
def send_command(self, command: str) -> int:
self.wait_for_vm_up()
return self.__send_command(command)
def __send_command(self, command: str, hide_output=False, print_command=True) -> int:
if self.use_ssh:
fullcmd = ["ssh", "-p", str(self.ssh_port), "{0}@127.0.0.1".format(self.ssh_username), '"{0}"'.format(command)]
else:
fullcmd = ["VBoxManage", "guestcontrol", '"{0}"'.format(self.vm_name), "run"] \
+ self.shell.split(" ") \
+ ['"{0}"'.format(command)]
kwargs = {"stdout": DEVNULL, "stderr": DEVNULL} if hide_output else {}
fullcmd = " ".join(fullcmd)
if print_command:
print("\033[1m" + fullcmd + "\033[0m")
return call(fullcmd, shell=True, **kwargs)
class TestInstallation(unittest.TestCase):
def test_linux(self):
distributions = [
#"archlinux",
"debian8",
#"ubuntu1404",
"ubuntu1604",
#"kali",
# "gentoo" # cant test gentoo till this bug is fixed: https://github.com/docker/docker/issues/1916#issuecomment-184356102
]
for distribution in distributions:
self.assertTrue(docker_util.run_image(distribution, rebuild=False), msg=distribution)
def test_windows(self):
"""
Run the unittests on Windows + Install via Pip
To Fix Windows Error in Guest OS:
type gpedit.msc and go to:
Windows Settings
-> Security Settings
-> Local Policies
-> Security Options
-> Accounts: Limit local account use of blank passwords to console logon only
and set it to DISABLED.
configure pip on guest:
%APPDATA%\Roaming\pip
[global]
no-cache-dir = false
[uninstall]
yes = true
:return:
"""
target_dir = r"C:\urh"
vm_helper = VMHelper("Windows 10", shell="cmd.exe /c")
vm_helper.start_vm()
vm_helper.send_command("pip uninstall urh")
vm_helper.send_command("rd /s /q {0}".format(target_dir))
vm_helper.send_command("git clone https://github.com/jopohl/urh " + target_dir)
rc = vm_helper.send_command(r"python C:\urh\src\urh\cythonext\build.py")
self.assertEqual(rc, 0)
rc = vm_helper.send_command(r"py.test C:\urh\tests".format(target_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("pip install urh")
time.sleep(0.5)
rc = vm_helper.send_command("urh autoclose")
self.assertEqual(rc, 0)
vm_helper.send_command("pip uninstall urh")
vm_helper.stop_vm()
def test_osx(self):
"""
Run Unittests + Pip Installation on OSX
:return:
"""
vm_helper = VMHelper("OSX", ssh_port="3022", ssh_username="boss")
vm_helper.start_vm()
python_bin_dir = "/Library/Frameworks/Python.framework/Versions/3.5/bin/"
target_dir = "/tmp/urh"
vm_helper.send_command("rm -rf {0}".format(target_dir))
vm_helper.send_command("git clone https://github.com/jopohl/urh " + target_dir)
# Build extensions
rc = vm_helper.send_command("{0}python3 {1}/src/urh/cythonext/build.py".format(python_bin_dir, target_dir))
self.assertEqual(rc, 0)
# Run Unit tests
rc = vm_helper.send_command("{1}py.test {0}/tests".format(target_dir, python_bin_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("{0}pip3 --no-cache-dir install urh".format(python_bin_dir))
rc = vm_helper.send_command("{0}urh autoclose".format(python_bin_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("{0}pip3 uninstall --yes urh".format(python_bin_dir))
vm_helper.stop_vm()
| splotz90/urh | tests/TestInstallation.py | Python | gpl-3.0 | 5,216 | 0.004793 |
import pdb
def calc(i, n):
j = i * n
return j
def f(n):
for i in range(n):
j = calc(i, n)
print(i, j)
return
if __name__ == '__main__':
pdb.set_trace()
f(5)
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_developer_tools/pdb_next.py | Python | apache-2.0 | 202 | 0.00495 |
# -*- coding: utf-8 -*-
from gi.repository import GLib, GObject, Gio
from dfeet import dbus_utils
def args_signature_markup(arg_signature):
return '<small><span foreground="#2E8B57">%s</span></small>' % (arg_signature)
def args_name_markup(arg_name):
return '<small>%s</small>' % (arg_name,)
class DBusNode(GObject.GObject):
"""object to represent a DBus Node (object path)"""
def __init__(self, name, object_path, node_info):
GObject.GObject.__init__(self)
self.__name = name
self.__object_path = object_path
self.__node_info = node_info # Gio.GDBusNodeInfo object
def __repr__(self):
return "Name: %s ; ObjPath: %s ; NodeInfo: %s" % (
self.name, self.object_path, self.node_info)
@property
def name(self):
return self.__name
@property
def object_path(self):
return self.__object_path
@property
def node_info(self):
return self.__node_info
class DBusInterface(DBusNode):
"""object to represent a DBus Interface"""
def __init__(self, dbus_node_obj, iface_info):
DBusNode.__init__(self, dbus_node_obj.name,
dbus_node_obj.object_path, dbus_node_obj.node_info)
self.__iface_info = iface_info # Gio.GDBusInterfaceInfo object
def __repr__(self):
return "iface '%s' on node '%s'" % (self.iface_info.name, self.node_info.path)
@property
def iface_info(self):
return self.__iface_info
class DBusProperty(DBusInterface):
"""object to represent a DBus Property"""
def __init__(self, dbus_iface_obj, property_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__property_info = property_info # Gio.GDBusPropertyInfo object
self.__value = None # the value
def __repr__(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
return "%s %s (%s)" % (sig, self.property_info.name, self.property_info.flags)
@property
def property_info(self):
return self.__property_info
@property
def value(self):
return self.__value
@value.setter
def value(self, new_val):
self.__value = new_val
@property
def markup_str(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
readwrite = list()
if self.readable:
readwrite.append("read")
if self.writable:
readwrite.append("write")
s = "%s %s <small>(%s)</small>" % (
args_signature_markup(sig),
args_name_markup(self.property_info.name), " / ".join(readwrite))
if self.value is not None:
s += " = %s" % (GLib.markup_escape_text(str(self.value), -1),)
return s
@property
def readable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.READABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
@property
def writable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.WRITABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
class DBusSignal(DBusInterface):
"""object to represent a DBus Signal"""
def __init__(self, dbus_iface_obj, signal_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__signal_info = signal_info # Gio.GDBusSignalInfo object
def __repr__(self):
return "%s" % (self.signal_info.name)
@property
def signal_info(self):
return self.__signal_info
@property
def args(self):
args = list()
for arg in self.signal_info.args:
sig = dbus_utils.sig_to_string(arg.signature)
args.append({'signature': sig, 'name': arg.name})
return args
@property
def args_markup_str(self):
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join('%s' % (args_signature_markup(arg['signature'])) for arg in self.args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def markup_str(self):
return "%s %s" % (self.signal_info.name, self.args_markup_str)
class DBusMethod(DBusInterface):
"""object to represent a DBus Method"""
def __init__(self, dbus_iface_obj, method_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__method_info = method_info # Gio.GDBusMethodInfo object
def __repr__(self):
return "%s(%s) ↦ %s (%s)" % (
self.method_info.name, self.in_args_str,
self.out_args_str, DBusInterface.__repr__(self))
@property
def in_args_code(self):
in_args = ""
for a in self.__method_info.in_args:
in_args += a.signature
return in_args
@property
def method_info(self):
return self.__method_info
@property
def markup_str(self):
return "%s %s <b>↦</b> %s" % (
self.method_info.name, self.in_args_markup_str, self.out_args_markup_str)
@property
def in_args(self):
in_args = list()
for in_arg in self.method_info.in_args:
sig = dbus_utils.sig_to_string(in_arg.signature)
in_args.append({'signature': sig, 'name': in_arg.name})
return in_args
@property
def out_args(self):
out_args = list()
for out_arg in self.method_info.out_args:
sig = dbus_utils.sig_to_string(out_arg.signature)
out_args.append({'signature': sig, 'name': out_arg.name})
return out_args
@property
def in_args_str(self):
result = ""
for arg in self.in_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
@property
def out_args_str(self):
result = ""
for arg in self.out_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
def __args_markup_str(self, args):
"""markup a given list of args"""
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join(
'%s %s' % (
args_signature_markup(arg['signature']),
args_name_markup(arg['name'])) for arg in args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def in_args_markup_str(self):
return self.__args_markup_str(self.in_args)
@property
def out_args_markup_str(self):
return self.__args_markup_str(self.out_args)
class DBusAnnotation(DBusInterface):
"""object to represent a DBus Annotation"""
def __init__(self, dbus_iface_obj, annotation_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__annotation_info = annotation_info # Gio.GDBusAnnotationInfo object
def __repr__(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
@property
def annotation_info(self):
return self.__annotation_info
@property
def markup_str(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
| GNOME/d-feet | src/dfeet/introspection_helper.py | Python | gpl-2.0 | 7,626 | 0.001574 |
from scrapelib import HTTPError
from openstates.utils import LXMLMixin
from pupa.scrape import Person, Scraper
class UTPersonScraper(Scraper, LXMLMixin):
def scrape(self):
PARTIES = {"R": "Republican", "D": "Democratic"}
representative_url = "http://house.utah.gov/rep/{}"
senator_url = "http://senate.utah.gov/senators/district{}.html"
json_link = "http://le.utah.gov/data/legislators.json"
person_json = self.get(json_link).json()
for info in person_json["legislators"]:
chamber = "lower" if info["house"] == "H" else "upper"
person = Person(
name=info["formatName"],
district=info["district"],
party=PARTIES[info["party"]],
image=info["image"],
primary_org=chamber,
)
person.add_source(json_link)
if chamber == "lower":
link = representative_url.format(info["id"])
else:
link = senator_url.format(info["district"])
try:
self.head(link)
except HTTPError:
self.logger.warning("Bad URL for {}".format(info["formatName"]))
else:
person.add_link(link)
address = info.get("address")
email = info.get("email")
fax = info.get("fax")
# Work phone seems to be the person's non-legislative
# office phone, and thus a last option
# For example, we called one and got the firm
# where he's a lawyer. We're picking
# them in order of how likely we think they are
# to actually get us to the person we care about.
phone = info.get("cell") or info.get("homePhone") or info.get("workPhone")
if address:
person.add_contact_detail(
type="address", value=address, note="District Office"
)
if phone:
person.add_contact_detail(
type="voice", value=phone, note="District Office"
)
if email:
person.add_contact_detail(
type="email", value=email, note="District Office"
)
if fax:
person.add_contact_detail(type="fax", value=fax, note="District Office")
BASE_FINANCE_URL = "http://www.disclosures.utah.gov/Search/PublicSearch"
conflicts_of_interest = info.get("CofI") or []
finance_reports = info.get("FinanceReport") or []
extra_links = []
for conflict in conflicts_of_interest:
extra_links.append(conflict["url"])
for finance in finance_reports:
# Some links are just to the base disclosure website
# Presumably, these members don't yet have their forms up
if finance != BASE_FINANCE_URL:
extra_links.append(finance["url"])
if extra_links:
person.extras["links"] = extra_links
yield person
| openstates/openstates | openstates/ut/people.py | Python | gpl-3.0 | 3,136 | 0.001276 |
"""
Helper functions for CP apps
"""
import six
from cherrypy._cpcompat import urljoin as _urljoin, urlencode as _urlencode
from cherrypy._cpcompat import basestring
import cherrypy
def expose(func=None, alias=None):
"""
Expose the function or class, optionally providing an alias or set of aliases.
"""
def expose_(func):
func.exposed = True
if alias is not None:
if isinstance(alias, basestring):
parents[alias.replace(".", "_")] = func
else:
for a in alias:
parents[a.replace(".", "_")] = func
return func
import sys
import types
decoratable_types = types.FunctionType, types.MethodType, type,
if six.PY2:
# Old-style classes are type types.ClassType.
decoratable_types += types.ClassType,
if isinstance(func, decoratable_types):
if alias is None:
# @expose
func.exposed = True
return func
else:
# func = expose(func, alias)
parents = sys._getframe(1).f_locals
return expose_(func)
elif func is None:
if alias is None:
# @expose()
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose(alias="alias") or
# @expose(alias=["alias1", "alias2"])
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose("alias") or
# @expose(["alias1", "alias2"])
parents = sys._getframe(1).f_locals
alias = func
return expose_
def popargs(*args, **kwargs):
"""A decorator for _cp_dispatch
(cherrypy.dispatch.Dispatcher.dispatch_method_name).
Optional keyword argument: handler=(Object or Function)
Provides a _cp_dispatch function that pops off path segments into
cherrypy.request.params under the names specified. The dispatch
is then forwarded on to the next vpath element.
Note that any existing (and exposed) member function of the class that
popargs is applied to will override that value of the argument. For
instance, if you have a method named "list" on the class decorated with
popargs, then accessing "/list" will call that function instead of popping
it off as the requested parameter. This restriction applies to all
_cp_dispatch functions. The only way around this restriction is to create
a "blank class" whose only function is to provide _cp_dispatch.
If there are path elements after the arguments, or more arguments
are requested than are available in the vpath, then the 'handler'
keyword argument specifies the next object to handle the parameterized
request. If handler is not specified or is None, then self is used.
If handler is a function rather than an instance, then that function
will be called with the args specified and the return value from that
function used as the next object INSTEAD of adding the parameters to
cherrypy.request.args.
This decorator may be used in one of two ways:
As a class decorator:
@cherrypy.popargs('year', 'month', 'day')
class Blog:
def index(self, year=None, month=None, day=None):
#Process the parameters here; any url like
#/, /2009, /2009/12, or /2009/12/31
#will fill in the appropriate parameters.
def create(self):
#This link will still be available at /create. Defined functions
#take precedence over arguments.
Or as a member of a class:
class Blog:
_cp_dispatch = cherrypy.popargs('year', 'month', 'day')
#...
The handler argument may be used to mix arguments with built in functions.
For instance, the following setup allows different activities at the
day, month, and year level:
class DayHandler:
def index(self, year, month, day):
#Do something with this day; probably list entries
def delete(self, year, month, day):
#Delete all entries for this day
@cherrypy.popargs('day', handler=DayHandler())
class MonthHandler:
def index(self, year, month):
#Do something with this month; probably list entries
def delete(self, year, month):
#Delete all entries for this month
@cherrypy.popargs('month', handler=MonthHandler())
class YearHandler:
def index(self, year):
#Do something with this year
#...
@cherrypy.popargs('year', handler=YearHandler())
class Root:
def index(self):
#...
"""
# Since keyword arg comes after *args, we have to process it ourselves
# for lower versions of python.
handler = None
handler_call = False
for k, v in kwargs.items():
if k == 'handler':
handler = v
else:
raise TypeError(
"cherrypy.popargs() got an unexpected keyword argument '{0}'"
.format(k)
)
import inspect
if handler is not None \
and (hasattr(handler, '__call__') or inspect.isclass(handler)):
handler_call = True
def decorated(cls_or_self=None, vpath=None):
if inspect.isclass(cls_or_self):
# cherrypy.popargs is a class decorator
cls = cls_or_self
setattr(cls, cherrypy.dispatch.Dispatcher.dispatch_method_name, decorated)
return cls
# We're in the actual function
self = cls_or_self
parms = {}
for arg in args:
if not vpath:
break
parms[arg] = vpath.pop(0)
if handler is not None:
if handler_call:
return handler(**parms)
else:
cherrypy.request.params.update(parms)
return handler
cherrypy.request.params.update(parms)
# If we are the ultimate handler, then to prevent our _cp_dispatch
# from being called again, we will resolve remaining elements through
# getattr() directly.
if vpath:
return getattr(self, vpath.pop(0), None)
else:
return self
return decorated
def url(path="", qs="", script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = _urlencode(qs)
if qs:
qs = '?' + qs
if cherrypy.request.app:
if not path.startswith("/"):
# Append/remove trailing slash from path_info as needed
# (this is to support mistyped URL's without redirecting;
# if you want to redirect, use tools.trailing_slash).
pi = cherrypy.request.path_info
if cherrypy.request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif cherrypy.request.is_index is False:
if pi.endswith('/') and pi != '/':
pi = pi[:-1]
if path == "":
path = pi
else:
path = _urljoin(pi, path)
if script_name is None:
script_name = cherrypy.request.script_name
if base is None:
base = cherrypy.request.base
newurl = base + script_name + path + qs
else:
# No request.app (we're being called outside a request).
# We'll have to guess the base from server.* attributes.
# This will produce very different results from the above
# if you're using vhosts or tools.proxy.
if base is None:
base = cherrypy.server.base()
path = (script_name or "") + path
newurl = base + path + qs
if './' in newurl:
# Normalize the URL by removing ./ and ../
atoms = []
for atom in newurl.split('/'):
if atom == '.':
pass
elif atom == '..':
atoms.pop()
else:
atoms.append(atom)
newurl = '/'.join(atoms)
# At this point, we should have a fully-qualified absolute URL.
if relative is None:
relative = getattr(cherrypy.request.app, "relative_urls", False)
# See http://www.ietf.org/rfc/rfc2396.txt
if relative == 'server':
# "A relative reference beginning with a single slash character is
# termed an absolute-path reference, as defined by <abs_path>..."
# This is also sometimes called "server-relative".
newurl = '/' + '/'.join(newurl.split('/', 3)[3:])
elif relative:
# "A relative reference that does not begin with a scheme name
# or a slash character is termed a relative-path reference."
old = url(relative=False).split('/')[:-1]
new = newurl.split('/')
while old and new:
a, b = old[0], new[0]
if a != b:
break
old.pop(0)
new.pop(0)
new = (['..'] * len(old)) + new
newurl = '/'.join(new)
return newurl
| jealousrobot/PlexArt | lib/cherrypy/_helper.py | Python | gpl-3.0 | 10,332 | 0.000194 |
"""
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import logging
import os
import sys
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
from django.utils import importlib
from django.utils.module_loading import import_by_path
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
self._configure_logging()
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def _configure_logging(self):
"""
Setup logging from LOGGING_CONFIG and LOGGING settings.
"""
if not sys.warnoptions:
try:
# Route warnings through python logging
logging.captureWarnings(True)
# Allow DeprecationWarnings through the warnings filters
warnings.simplefilter("default", DeprecationWarning)
except AttributeError:
# No captureWarnings on Python 2.6, DeprecationWarnings are on anyway
pass
if self.LOGGING_CONFIG:
from django.utils.log import DEFAULT_LOGGING
# First find the logging configuration function ...
logging_config_func = import_by_path(self.LOGGING_CONFIG)
logging_config_func(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if self.LOGGING:
logging_config_func(self.LOGGING)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
self._configure_logging()
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, six.string_types):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError as e:
raise ImportError(
"Could not import settings '%s' (Is it on sys.path? Is there an import error in the settings file?): %s"
% (self.SETTINGS_MODULE, e)
)
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, six.string_types):
warnings.warn("The %s setting must be a tuple. Please fix your "
"settings, as auto-correction is now deprecated." % setting,
DeprecationWarning, stacklevel=2)
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
return super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
return super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
settings = LazySettings()
| edisonlz/fruit | web_project/base/site-packages/django/conf/__init__.py | Python | apache-2.0 | 7,796 | 0.002437 |
from IPython.core import ipapi
from IPython.core import macro
ip = ipapi.get()
import os,pprint
def export(filename = None):
lines = ['import IPython.core.ipapi', 'ip = IPython.core.ipapi.get()','']
vars = ip.db.keys('autorestore/*')
vars.sort()
varstomove = []
get = ip.db.get
macros = []
variables = []
for var in vars:
k = os.path.basename(var)
v = get(var)
if k.startswith('_'):
continue
if isinstance(v, macro.Macro):
macros.append((k,v))
if type(v) in [int, str, float]:
variables.append((k,v))
if macros:
lines.extend(['# === Macros ===' ,''])
for k,v in macros:
lines.append("ip.defmacro('%s'," % k)
for line in v.value.splitlines():
lines.append(' ' + repr(line+'\n'))
lines.extend([')', ''])
if variables:
lines.extend(['','# === Variables ===',''])
for k,v in variables:
varstomove.append(k)
lines.append('%s = %s' % (k,repr(v)))
lines.append('ip.push("%s")' % (' '.join(varstomove)))
bkms = ip.db.get('bookmarks',{})
if bkms:
lines.extend(['','# === Bookmarks ===',''])
lines.append("ip.db['bookmarks'] = %s " % pprint.pformat(bkms, indent = 2) )
aliases = ip.db.get('stored_aliases', {} )
if aliases:
lines.extend(['','# === Alias definitions ===',''])
for k,v in aliases.items():
try:
lines.append("ip.define_alias('%s', %s)" % (k, repr(v[1])))
except (AttributeError, TypeError):
pass
env = ip.db.get('stored_env')
if env:
lines.extend(['','# === Stored env vars ===',''])
lines.append("ip.db['stored_env'] = %s " % pprint.pformat(env, indent = 2) )
out = '\n'.join(lines)
if filename:
open(filename,'w').write(out)
else:
print out
| sodafree/backend | build/ipython/IPython/quarantine/ipy_exportdb.py | Python | bsd-3-clause | 2,037 | 0.025037 |
"""passlib tests"""
| charukiewicz/beer-manager | venv/lib/python3.4/site-packages/passlib/tests/__init__.py | Python | mit | 20 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
- Aaron Longchamps (@alongchamps)
notes:
- Tested on vSphere 6.0, vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Additional IPs and networks can also be specified
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
- name: Manage IP and network based firewall permissions for ESXi
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: gdbserver
enabled: True
allowed_hosts:
- all_ip: False
ip_address:
192.168.20.10
- name: CIMHttpServer
enabled: True
allowed_hosts:
- all_ip: False
ip_network:
192.168.100.0/24
- name: remoteSerialPort
enabled: True
allowed_hosts:
- all_ip: False
ip_address:
192.168.100.11
ip_network:
192.168.200.0/24
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": False,
"desired_state": False,
"previous_state": True,
"allowed_hosts": {
"current_allowed_all": True,
"previous_allowed_all": True,
"desired_allowed_all": True,
"current_allowed_ip": [],
"previous_allowed_ip": [],
"desired_allowed_ip": [],
"current_allowed_networks": [],
"previous_allowed_networks": [],
"desired_allowed_networks": [],
}
},
"remoteSerialPort": {
"current_state": True,
"desired_state": True,
"previous_state": True,
"allowed_hosts": {
"current_allowed_all": False,
"previous_allowed_all": True,
"desired_allowed_all": False,
"current_allowed_ip": ["192.168.100.11"],
"previous_allowed_ip": [],
"desired_allowed_ip": ["192.168.100.11"],
"current_allowed_networks": ["192.168.200.0/24"],
"previous_allowed_networks": [],
"desired_allowed_networks": ["192.168.200.0/24"],
}
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
from ansible.module_utils.compat import ipaddress
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
allowed_host = rule_set_obj.allowedHosts
rule_allow_host = dict()
rule_allow_host['ip_address'] = allowed_host.ipAddress
rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork]
rule_allow_host['all_ip'] = allowed_host.allIp
temp_rule_dict['allowed_hosts'] = rule_allow_host
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
enable_disable_changed = False
allowed_ip_changed = False
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
# validate IP addresses are valid
rule_config = rule_option.get('allowed_hosts', None)
if 'ip_address' in rule_config[0].keys():
for ip_addr in rule_config[0]['ip_address']:
try:
ip = ipaddress.ip_address(ip_addr)
except ValueError:
self.module.fail_json(msg="The provided IP address %s is not a valid IP"
" for the rule %s" % (ip_addr, rule_name))
# validate provided subnets are valid networks
if 'ip_network' in rule_config[0].keys():
for ip_net in rule_config[0]['ip_network']:
try:
network_validation = ipaddress.ip_network(ip_net)
except ValueError:
self.module.fail_json(msg="The provided network %s is not a valid network"
" for the rule %s" % (ip_net, rule_name))
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if not self.module.check_mode:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
# keep track of changes as we go
enable_disable_changed = True
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
# save variables here for comparison later and change tracking
# also covers cases where inputs may be null
permitted_networking = self.firewall_facts[host.name][rule_name]
rule_allows_all = permitted_networking['allowed_hosts']['all_ip']
playbook_allows_all = rule_config[0]['all_ip']
rule_allowed_ip = set(permitted_networking['allowed_hosts']['ip_address'])
playbook_allowed_ip = set(rule_config[0].get('ip_address', ''))
rule_allowed_networks = set(permitted_networking['allowed_hosts']['ip_network'])
playbook_allowed_networks = set(rule_config[0].get('ip_network', ''))
# compare what is configured on the firewall rule with what the playbook provides
allowed_all_ips_different = bool(rule_allows_all != playbook_allows_all)
ip_list_different = bool(rule_allowed_ip != playbook_allowed_ip)
ip_network_different = bool(rule_allowed_networks != playbook_allowed_networks)
# apply everything here in one function call
if allowed_all_ips_different is True or ip_list_different is True or ip_network_different is True:
try:
allowed_ip_changed = True
if not self.module.check_mode:
# setup spec
firewall_spec = vim.host.Ruleset.RulesetSpec()
firewall_spec.allowedHosts = vim.host.Ruleset.IpList()
firewall_spec.allowedHosts.allIp = rule_config[0].get('all_ip', True)
firewall_spec.allowedHosts.ipAddress = rule_config[0].get('ip_address', None)
firewall_spec.allowedHosts.ipNetwork = []
if 'ip_network' in rule_config[0].keys():
for allowed_network in rule_config[0].get('ip_network', None):
tmp_ip_network_spec = vim.host.Ruleset.IpNetwork()
tmp_ip_network_spec.network = allowed_network.split("/")[0]
tmp_ip_network_spec.prefixLength = int(allowed_network.split("/")[1])
firewall_spec.allowedHosts.ipNetwork.append(tmp_ip_network_spec)
firewall_system.UpdateRuleset(id=rule_name, spec=firewall_spec)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to configure rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to configure rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
except vim.fault.RuntimeFault as runtime_fault:
self.module.fail_json(msg="Failed to conifgure the rule set %s as a runtime"
" error happened while applying the reconfiguration:"
" %s" % (rule_name, to_native(runtime_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
current_allowed_all=playbook_allows_all,
previous_allowed_all=permitted_networking['allowed_hosts']['all_ip'],
desired_allowed_all=playbook_allows_all,
current_allowed_ip=playbook_allowed_ip,
previous_allowed_ip=set(permitted_networking['allowed_hosts']['ip_address']),
desired_allowed_ip=playbook_allowed_ip,
current_allowed_networks=playbook_allowed_networks,
previous_allowed_networks=set(permitted_networking['allowed_hosts']['ip_network']),
desired_allowed_networks=playbook_allowed_networks
)
if enable_disable_changed or allowed_ip_changed:
fw_change_list.append(True)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
| pgmillon/ansible | lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py | Python | gpl-3.0 | 17,107 | 0.003975 |
from __future__ import print_function, unicode_literals
import argparse
import json
import sys
import textwrap
from tagalog import io, stamp, tag, fields
from tagalog import shipper
parser = argparse.ArgumentParser(description=textwrap.dedent("""
Ship log data from STDIN to somewhere else, timestamping and preprocessing
each log entry into a JSON document along the way."""))
parser.add_argument('-t', '--tags', nargs='+',
help='Tag each request with the specified string tags')
parser.add_argument('-f', '--fields', nargs='+',
help='Add key=value fields specified to each request')
parser.add_argument('-s', '--shipper', default='redis',
help='Select the shipper to be used to ship logs')
parser.add_argument('--no-stamp', action='store_true')
parser.add_argument('--bulk', action='store_true',
help='Send log data in elasticsearch bulk format')
parser.add_argument('--bulk-index', default='logs',
help='Name of the elasticsearch index (default: logs)')
parser.add_argument('--bulk-type', default='message',
help='Name of the elasticsearch type (default: message)')
# TODO: make these the responsibility of the redis shipper
parser.add_argument('-k', '--key', default='logs')
parser.add_argument('-u', '--urls', nargs='+', default=['redis://localhost:6379'])
def main():
args = parser.parse_args()
shpr = shipper.get_shipper(args.shipper)(args)
msgs = io.messages(sys.stdin)
if not args.no_stamp:
msgs = stamp(msgs)
if args.tags:
msgs = tag(msgs, args.tags)
if args.fields:
msgs = fields(msgs, args.fields)
for msg in msgs:
payload = json.dumps(msg)
if args.bulk:
command = json.dumps({'index': {'_index': args.bulk_index, '_type': args.bulk_type}})
payload = '{0}\n{1}\n'.format(command, payload)
shpr.ship(payload)
if __name__ == '__main__':
main()
| nickstenning/tagalog | tagalog/command/logship.py | Python | mit | 1,992 | 0.002008 |
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2007-2018 GPLV3
import os, sys, tarfile, io
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
def debug(s):
sys.stderr.write(s)
def protect(t):
lst = t.split('&')
t = "&".join(lst)
lst = t.split('<')
t = "<".join(lst)
lst = t.split('>')
t = ">".join(lst)
lst = t.split('"')
t = """.join(lst)
return t
head = """<?xml version="1.0" encoding="utf8"?>
<semantik version="1">
<color_schemes>
<color_scheme name="Color 0" inner="#fffe8d" border="#000000" text="#000000"/>
<color_scheme name="Color 1" inner="#91ffab" border="#000000" text="#000000"/>
<color_scheme name="Color 2" inner="#9bfffe" border="#000000" text="#000000"/>
<color_scheme name="Color 3" inner="#b8bbff" border="#000000" text="#000000"/>
<color_scheme name="Color 4" inner="#e0aaff" border="#000000" text="#000000"/>
<color_scheme name="Color 5" inner="#ffa6a6" border="#000000" text="#000000"/>
<color_scheme name="Color 6" inner="#ffd8a6" border="#000000" text="#000000"/>
<color_scheme name="Color 7" inner="#ffffff" border="#000000" text="#000000"/>
</color_schemes>
"""
textitem = """<item id="%s" summary="%s" text="%s" len="13" comment="%s" pic_location="" pic_caption="" pic_comment="" tbl_rows="0" tbl_cols="0" c1="%s" c2="%s" color="1" custom_name="" custom_border="#000000" custom_inner="#000000" custom_text="#000000">
<tblsettings rows="0" cols="0"/>
</item>
"""
lst_vars = ['id', 'summary', 'text', 'len', 'comment', 'pic_location', 'pic_caption', 'pic_comment', 'tbl_rows', 'tbl_cols', 'c1', 'c2', 'color', 'custom_name', 'custom_border', 'custom_inner', 'custom_text']
rep = {
'text':'text',
'summary':'summary',
'xpos':'c2',
'ypos':'c1',
'id':'id',
}
class FFHandler(ContentHandler):
def __init__(self):
self.buf = []
self.out = []
self.trucs = []
self.cur = 0
self.count = 1
self.ids = [] # stack
self.links = [] # tuples
def startElement(self, name, attrs):
self.buf = []
if name == 'map':
self.out.append(head)
if name == 'node':
self.count += 1
#self.cur += 1
#debug(str(self.cur))
id = self.count
if len(self.ids) > 0:
par = self.ids[-1]
self.links.append( (par, id) )
self.ids.append(id)
text = attrs.get('TEXT', '')
text = protect(text)
self.out.append('<item id="%d" summary="%s"/>\n' % (id, text))
def endElement(self, name):
txt = "".join(self.buf)
if name == 'node':
#self.cur -= 1
#debug(str(self.cur))
self.ids=self.ids[:-1]
elif name == 'map':
for (k, v) in self.links:
self.out.append('<link p="%d" v="%d"/>\n' % (k, v))
self.out.append('</semantik>')
def characters(self, cars):
self.buf.append(cars)
def parse_string(s):
parser = make_parser()
curHandler = FFHandler()
parser.setContentHandler(curHandler)
parser.parse(io.StringIO(str(s)))
return "".join(curHandler.out)
def parse_file(infile):
with open(infile, 'r', encoding='utf-8') as f:
txt = f.read()
truc = txt.replace('<?xml version="1.0" encoding="utf8"?>', '<?xml version="1.0" encoding="UTF-8"?>')
truc = parse_string(truc)
#file = open("/tmp/con.xml", "w")
#file.write(str(truc))
#file.close()
#debug(truc.encode('utf-8'))
return truc
| ita1024/semantik | src/filters/others.py | Python | gpl-3.0 | 3,251 | 0.03199 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
from typing import TYPE_CHECKING, Any, Callable, Collection, FrozenSet, Iterable, Optional, Union
from sqlalchemy import func
from airflow.exceptions import AirflowException
from airflow.models import BaseOperatorLink, DagBag, DagModel, DagRun, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.helpers import build_airflow_url_with_query
from airflow.utils.session import provide_session
from airflow.utils.state import State
class ExternalTaskSensorLink(BaseOperatorLink):
"""
Operator link for ExternalTaskSensor. It allows users to access
DAG waited with ExternalTaskSensor.
"""
name = 'External DAG'
def get_link(self, operator, dttm):
query = {"dag_id": operator.external_dag_id, "execution_date": dttm.isoformat()}
return build_airflow_url_with_query(query)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a different DAG or a task in a different DAG to complete for a
specific logical date.
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: str
:param external_task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:type external_task_id: str or None
:param external_task_ids: The list of task_ids that you want to wait for.
If ``None`` (default value) the sensor waits for the DAG. Either
external_task_id or external_task_ids can be passed to
ExternalTaskSensor, but not both.
:type external_task_ids: Iterable of task_ids or None, default is None
:param allowed_states: Iterable of allowed states, default is ``['success']``
:type allowed_states: Iterable
:param failed_states: Iterable of failed or dis-allowed states, default is ``None``
:type failed_states: Iterable
:param execution_delta: time difference with the previous execution to
look at, the default is the same logical date as the current task or DAG.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: Optional[datetime.timedelta]
:param execution_date_fn: function that receives the current execution's logical date as the first
positional argument and optionally any number of keyword arguments available in the
context dictionary, and returns the desired logical dates to query.
Either execution_delta or execution_date_fn can be passed to ExternalTaskSensor,
but not both.
:type execution_date_fn: Optional[Callable]
:param check_existence: Set to `True` to check if the external task exists (when
external_task_id is not None) or check if the DAG to wait for exists (when
external_task_id is None), and immediately cease waiting if the external task
or DAG does not exist (default value: False).
:type check_existence: bool
"""
template_fields = ['external_dag_id', 'external_task_id']
ui_color = '#19647e'
@property
def operator_extra_links(self):
"""Return operator extra links"""
return [ExternalTaskSensorLink()]
def __init__(
self,
*,
external_dag_id: str,
external_task_id: Optional[str] = None,
external_task_ids: Optional[Collection[str]] = None,
allowed_states: Optional[Iterable[str]] = None,
failed_states: Optional[Iterable[str]] = None,
execution_delta: Optional[datetime.timedelta] = None,
execution_date_fn: Optional[Callable] = None,
check_existence: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.allowed_states = list(allowed_states) if allowed_states else [State.SUCCESS]
self.failed_states = list(failed_states) if failed_states else []
total_states = set(self.allowed_states + self.failed_states)
if set(self.failed_states).intersection(set(self.allowed_states)):
raise AirflowException(
f"Duplicate values provided as allowed "
f"`{self.allowed_states}` and failed states `{self.failed_states}`"
)
if external_task_id is not None and external_task_ids is not None:
raise ValueError(
'Only one of `external_task_id` or `external_task_ids` may '
'be provided to ExternalTaskSensor; not both.'
)
if external_task_id is not None:
external_task_ids = [external_task_id]
if external_task_ids:
if not total_states <= set(State.task_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` or `external_task_ids` is not `None`: {State.task_states}'
)
if len(external_task_ids) > len(set(external_task_ids)):
raise ValueError('Duplicate task_ids passed in external_task_ids parameter')
elif not total_states <= set(State.dag_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` is `None`: {State.dag_states}'
)
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_delta` or `execution_date_fn` may '
'be provided to ExternalTaskSensor; not both.'
)
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
self.external_task_ids = external_task_ids
self.check_existence = check_existence
self._has_checked_existence = False
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['logical_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self._handle_execution_date_fn(context=context)
else:
dttm = context['logical_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(dt.isoformat() for dt in dttm_filter)
self.log.info(
'Poking for tasks %s in dag %s on %s ... ',
self.external_task_ids,
self.external_dag_id,
serialized_dttm_filter,
)
# In poke mode this will check dag existence only once
if self.check_existence and not self._has_checked_existence:
self._check_for_existence(session=session)
count_allowed = self.get_count(dttm_filter, session, self.allowed_states)
count_failed = -1
if self.failed_states:
count_failed = self.get_count(dttm_filter, session, self.failed_states)
if count_failed == len(dttm_filter):
if self.external_task_ids:
raise AirflowException(
f'Some of the external tasks {self.external_task_ids} '
f'in DAG {self.external_dag_id} failed.'
)
else:
raise AirflowException(f'The external DAG {self.external_dag_id} failed.')
return count_allowed == len(dttm_filter)
def _check_for_existence(self, session) -> None:
dag_to_wait = session.query(DagModel).filter(DagModel.dag_id == self.external_dag_id).first()
if not dag_to_wait:
raise AirflowException(f'The external DAG {self.external_dag_id} does not exist.')
if not os.path.exists(dag_to_wait.fileloc):
raise AirflowException(f'The external DAG {self.external_dag_id} was deleted.')
if self.external_task_ids:
refreshed_dag_info = DagBag(dag_to_wait.fileloc).get_dag(self.external_dag_id)
for external_task_id in self.external_task_ids:
if not refreshed_dag_info.has_task(external_task_id):
raise AirflowException(
f'The external task {external_task_id} in '
f'DAG {self.external_dag_id} does not exist.'
)
self._has_checked_existence = True
def get_count(self, dttm_filter, session, states) -> int:
"""
Get the count of records against dttm filter and states
:param dttm_filter: date time filter for execution date
:type dttm_filter: list
:param session: airflow session object
:type session: SASession
:param states: task or dag states
:type states: list
:return: count of record against the filters
"""
TI = TaskInstance
DR = DagRun
if self.external_task_ids:
count = (
session.query(func.count()) # .count() is inefficient
.filter(
TI.dag_id == self.external_dag_id,
TI.task_id.in_(self.external_task_ids),
TI.state.in_(states),
TI.execution_date.in_(dttm_filter),
)
.scalar()
)
count = count / len(self.external_task_ids)
else:
count = (
session.query(func.count())
.filter(
DR.dag_id == self.external_dag_id,
DR.state.in_(states),
DR.execution_date.in_(dttm_filter),
)
.scalar()
)
return count
def _handle_execution_date_fn(self, context) -> Any:
"""
This function is to handle backwards compatibility with how this operator was
previously where it only passes the execution date, but also allow for the newer
implementation to pass all context variables as keyword arguments, to allow
for more sophisticated returns of dates to return.
"""
from airflow.utils.operator_helpers import make_kwargs_callable
# Remove "logical_date" because it is already a mandatory positional argument
logical_date = context["logical_date"]
kwargs = {k: v for k, v in context.items() if k not in {"execution_date", "logical_date"}}
# Add "context" in the kwargs for backward compatibility (because context used to be
# an acceptable argument of execution_date_fn)
kwargs["context"] = context
if TYPE_CHECKING:
assert self.execution_date_fn is not None
kwargs_callable = make_kwargs_callable(self.execution_date_fn)
return kwargs_callable(logical_date, **kwargs)
class ExternalTaskMarker(DummyOperator):
"""
Use this operator to indicate that a task on a different DAG depends on this task.
When this task is cleared with "Recursive" selected, Airflow will clear the task on
the other DAG and its downstream tasks recursively. Transitive dependencies are followed
until the recursion_depth is reached.
:param external_dag_id: The dag_id that contains the dependent task that needs to be cleared.
:type external_dag_id: str
:param external_task_id: The task_id of the dependent task that needs to be cleared.
:type external_task_id: str
:param execution_date: The logical date of the dependent task execution that needs to be cleared.
:type execution_date: str or datetime.datetime
:param recursion_depth: The maximum level of transitive dependencies allowed. Default is 10.
This is mostly used for preventing cyclic dependencies. It is fine to increase
this number if necessary. However, too many levels of transitive dependencies will make
it slower to clear tasks in the web UI.
"""
template_fields = ['external_dag_id', 'external_task_id', 'execution_date']
ui_color = '#19647e'
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: Optional[FrozenSet[str]] = None
def __init__(
self,
*,
external_dag_id: str,
external_task_id: str,
execution_date: Optional[Union[str, datetime.datetime]] = "{{ logical_date.isoformat() }}",
recursion_depth: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
if isinstance(execution_date, datetime.datetime):
self.execution_date = execution_date.isoformat()
elif isinstance(execution_date, str):
self.execution_date = execution_date
else:
raise TypeError(
f'Expected str or datetime.datetime type for execution_date. Got {type(execution_date)}'
)
if recursion_depth <= 0:
raise ValueError("recursion_depth should be a positive integer")
self.recursion_depth = recursion_depth
@classmethod
def get_serialized_fields(cls):
"""Serialized ExternalTaskMarker contain exactly these fields + templated_fields ."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(super().get_serialized_fields() | {"recursion_depth"})
return cls.__serialized_fields
| mistercrunch/airflow | airflow/sensors/external_task.py | Python | apache-2.0 | 14,311 | 0.003005 |
# -*- coding: utf-8 -*-
#
# petl documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 19 11:16:43 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import petl
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.imgmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx_issues']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
issues_github_path = 'petl-developers/petl'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'petl'
copyright = u'2014, Alistair Miles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = petl.__version__
# The full version, including alpha/beta/rc tags.
release = petl.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'examples', 'notes', 'bin', 'dist']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'petldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'petl.tex', u'petl Documentation',
u'Alistair Miles', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'petl', u'petl Documentation',
[u'Alistair Miles'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
# disable temporarily
# intersphinx_mapping = {'http://docs.python.org/': None}
| alimanfoo/petl | docs/conf.py | Python | mit | 7,473 | 0.006423 |
from distutils.core import setup
setup(
name='PyMonad',
version='1.3',
author='Jason DeLaat',
author_email='jason.develops@gmail.com',
packages=['pymonad', 'pymonad.test'],
url='https://bitbucket.org/jason_delaat/pymonad',
license=open('LICENSE.txt').read(),
description='Collection of classes for programming with functors, applicative functors and monads.',
long_description=open('README.txt').read() + open("CHANGES.txt").read(),
classifiers=[ "Intended Audience :: Developers"
, "License :: OSI Approved :: BSD License"
, "Operating System :: OS Independent"
, "Programming Language :: Python :: 2.7"
, "Programming Language :: Python :: 3"
, "Topic :: Software Development"
, "Topic :: Software Development :: Libraries"
, "Topic :: Utilities"
],
)
| fnl/pymonad | setup.py | Python | bsd-3-clause | 824 | 0.033981 |
# Copyright 2014 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from trove.guestagent.strategy import Strategy
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_replication_strategy(replication_driver, ns=__name__):
LOG.debug("Getting replication strategy: %s.", replication_driver)
return Strategy.get_strategy(replication_driver, ns)
| CMSS-BCRDB/RDSV1.0 | trove/guestagent/strategies/replication/__init__.py | Python | apache-2.0 | 955 | 0 |
"""
This module defines the attributes of the
PyPI package for the mbed SDK test suite ecosystem tools
"""
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import os
from distutils.core import setup
from setuptools import find_packages
DESCRIPTION = "mbed-ls is a Python module that detects and lists mbed-enabled devices connected to the host computer"
OWNER_NAMES = 'Przemyslaw Wirkus, Johan Seferidis, James Crosby'
OWNER_EMAILS = 'Przemyslaw.Wirkus@arm.com, Johan.Seferidis@arm.com, James.Crosby@arm.com'
# Utility function to cat in a file (used for the README)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='mbed-ls',
version='0.1.19',
description=DESCRIPTION,
long_description=read('README.md'),
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url='https://github.com/ARMmbed/mbed-ls',
packages=find_packages(),
license="Apache-2.0",
test_suite = 'test',
entry_points={
"console_scripts": [
"mbedls=mbed_lstools:mbedls_main",
],
},
install_requires=["PrettyTable>=0.7.2"])
| bridadan/mbed-ls | setup.py | Python | apache-2.0 | 1,786 | 0.004479 |
from collections.abc import Iterable
from TM1py.Services import CellService
from TM1py.Services import ElementService
from TM1py.Utils import require_pandas
try:
import pandas as pd
_has_pandas = True
except ImportError:
_has_pandas = False
class PowerBiService:
def __init__(self, tm1_rest):
"""
:param tm1_rest: instance of RestService
"""
self._tm1_rest = tm1_rest
self.cells = CellService(tm1_rest)
self.elements = ElementService(tm1_rest)
@require_pandas
def execute_mdx(self, mdx, **kwargs) -> 'pd.DataFrame':
return self.cells.execute_mdx_dataframe_shaped(mdx, **kwargs)
@require_pandas
def execute_view(self, cube_name, view_name, private, **kwargs) -> 'pd.DataFrame':
return self.cells.execute_view_dataframe_shaped(cube_name, view_name, private, **kwargs)
@require_pandas
def get_member_properties(self, dimension_name: str, hierarchy_name: str, member_selection: Iterable = None,
skip_consolidations: bool = True, attributes: Iterable = None,
skip_parents: bool = False, level_names=None,
parent_attribute: str = None) -> 'pd.DataFrame':
"""
:param dimension_name: Name of the dimension
:param hierarchy_name: Name of the hierarchy in the dimension
:param member_selection: Selection of members. Iterable or valid MDX string
:param skip_consolidations: Boolean flag to skip consolidations
:param attributes: Selection of attributes. Iterable. If None retrieve all.
:param level_names: List of labels for parent columns. If None use level names from TM1.
:param skip_parents: Boolean Flag to skip parent columns.
:param parent_attribute: Attribute to be displayed in parent columns. If None, parent name is used.
:return: pandas DataFrame
"""
if not member_selection:
member_selection = f"{{ [{dimension_name}].[{hierarchy_name}].Members }}"
if skip_consolidations:
member_selection = f"{{ Tm1FilterByLevel({member_selection}, 0) }}"
if not isinstance(member_selection, str):
if isinstance(member_selection, Iterable):
member_selection = "{" + ",".join(f"[{dimension_name}].[{member}]" for member in member_selection) + "}"
else:
raise ValueError("Argument 'element_selection' must be None or str")
if not self.elements.attribute_cube_exists(dimension_name):
raise RuntimeError(self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name + " cube must exist")
members = [tupl[0] for tupl in self.elements.execute_set_mdx(
mdx=member_selection,
element_properties=None,
member_properties=("Name", "UniqueName"),
parent_properties=None)]
element_types = self.elements.get_element_types(
dimension_name=dimension_name,
hierarchy_name=hierarchy_name,
skip_consolidations=skip_consolidations)
df = pd.DataFrame(
data=[(member["Name"], element_types[member["Name"]])
for member
in members
if member["Name"] in element_types],
dtype=str,
columns=[dimension_name, 'Type'])
calculated_members_definition = list()
calculated_members_selection = list()
if not skip_parents:
levels = self.elements.get_levels_count(dimension_name, hierarchy_name)
# potential custom parent names
if not level_names:
level_names = self.elements.get_level_names(dimension_name, hierarchy_name, descending=True)
for parent in range(1, levels, 1):
name_or_attribute = f"Properties('{parent_attribute}')" if parent_attribute else "Name"
member = f"""
MEMBER [{self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name}].[{level_names[parent]}]
AS [{dimension_name}].CurrentMember.{'Parent.' * parent}{name_or_attribute}
"""
calculated_members_definition.append(member)
calculated_members_selection.append(
f"[{self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name}].[{level_names[parent]}]")
if attributes is None:
column_selection = "{Tm1SubsetAll([" + self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name + "])}"
else:
column_selection = "{" + ",".join(
"[" + self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name + "].[" + attribute + "]"
for attribute
in attributes) + "}"
if calculated_members_selection:
column_selection = column_selection + " + {" + ",".join(calculated_members_selection) + "}"
member_selection = ",".join(
member["UniqueName"]
for member
in members)
mdx_with_block = ""
if calculated_members_definition:
mdx_with_block = "WITH " + " ".join(calculated_members_definition)
mdx = f"""
{mdx_with_block}
SELECT
{{ {member_selection} }} ON ROWS,
{{ {column_selection} }} ON COLUMNS
FROM [{self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name}]
"""
df_data = self.execute_mdx(mdx)
# override hierarchy name
df_data.rename(columns={hierarchy_name:dimension_name},inplace=True)
# shift levels to right hand side
if not skip_parents:
# skip max level (= leaves)
level_names = level_names[1:]
# iterative approach
for _ in level_names:
rows_to_shift = df_data[df_data[level_names[-1]] == ''].index
if rows_to_shift.empty:
break
df_data.iloc[rows_to_shift, -len(level_names):] = df_data.iloc[rows_to_shift, -len(level_names):].shift(
1, axis=1)
df_data.iloc[:, -len(level_names):] = df_data.iloc[:, -len(level_names):].fillna('')
return pd.merge(df, df_data, on=dimension_name).drop_duplicates()
| OLAPLINE/TM1py | TM1py/Services/PowerBiService.py | Python | mit | 6,310 | 0.004596 |
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from .core import * | maxolasersquad/ninja-ide | ninja_ide/core/__init__.py | Python | gpl-3.0 | 712 | 0.001404 |
# -*- coding: utf-8 -*-
'''
The service module for OpenBSD
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin
# Import Salt libs
import salt.utils
log = logging.getLogger(__name__)
# XXX enable/disable support would be nice
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'reload_': 'reload'
}
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
krel = list(list(map(int, __grains__['kernelrelease'].split('.'))))
# The -f flag, used to force a script to run even if disabled,
# was added after the 5.0 release.
# the rcctl(8) command is the preferred way to manage services.
if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0):
if not os.path.exists('/usr/sbin/rcctl'):
return __virtualname__
return (False, 'The openbsdservice execution module cannot be loaded: '
'only available on OpenBSD systems.')
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
def reload_(name):
'''
.. versionadded:: 2014.7.0
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '/etc/rc.d/{0} -f reload'.format(name)
return not __salt__['cmd.retcode'](cmd)
import re
service_flags_regex = re.compile(r'^\s*(\w[\d\w]*)_flags=(?:(NO)|.*)$')
pkg_scripts_regex = re.compile(r'^\s*pkg_scripts=\'(.*)\'$')
start_daemon_call_regex = re.compile(r'(\s*start_daemon(?!\(\)))')
start_daemon_parameter_regex = re.compile(r'(?:\s+(\w[\w\d]*))')
def _get_rc():
'''
Returns a dict where the key is the daemon's name and
the value a boolean indicating its status (True: enabled or False: disabled).
Check the daemons started by the system in /etc/rc and
configured in /etc/rc.conf and /etc/rc.conf.local.
Also add to the dict all the localy enabled daemons via $pkg_scripts.
'''
daemons_flags = {}
try:
# now read the system startup script /etc/rc
# to know what are the system enabled daemons
with salt.utils.fopen('/etc/rc', 'r') as handle:
lines = handle.readlines()
except IOError:
log.error('Unable to read /etc/rc')
else:
for line in lines:
match = start_daemon_call_regex.match(line)
if match:
# the matched line is a call to start_daemon()
# we remove the function name
line = line[len(match.group(1)):]
# we retrieve each daemon name from the parameters of start_daemon()
for daemon in start_daemon_parameter_regex.findall(line):
# mark it as enabled
daemons_flags[daemon] = True
# this will execute rc.conf and rc.conf.local
# used in /etc/rc at boot to start the daemons
variables = __salt__['cmd.run']('(. /etc/rc.conf && set)',
clean_env=True,
output_loglevel='quiet',
python_shell=True).split('\n')
for var in variables:
match = service_flags_regex.match(var)
if match:
# the matched var look like daemon_name_flags=, we test its assigned value
# NO: disabled, everything else: enabled
# do not create a new key if the service hasn't been found in /etc/rc, see $pkg_scripts
if match.group(2) == 'NO':
daemons_flags[match.group(1)] = False
else:
match = pkg_scripts_regex.match(var)
if match:
# the matched var is pkg_scripts
# we can retrieve the name of each localy enabled daemon that wasn't hand started via /etc/rc
for daemon in match.group(1).split():
# create a new key and mark it as enabled
daemons_flags[daemon] = True
return daemons_flags
def available(name):
'''
.. versionadded:: 2014.7.0
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
path = '/etc/rc.d/{0}'.format(name)
return os.path.isfile(path) and os.access(path, os.X_OK)
def missing(name):
'''
.. versionadded:: 2014.7.0
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return not available(name)
def get_all():
'''
.. versionadded:: 2014.7.0
Return all available boot services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = []
if not os.path.isdir('/etc/rc.d'):
return services
for service in os.listdir('/etc/rc.d'):
# this will remove rc.subr and all non executable files
if available(service):
services.append(service)
return sorted(services)
def get_enabled():
'''
.. versionadded:: 2014.7.0
Return a list of service that are enabled on boot
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services))
def enabled(name, **kwargs):
'''
.. versionadded:: 2014.7.0
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return name in get_enabled()
def get_disabled():
'''
.. versionadded:: 2014.7.0
Return a set of services that are installed but disabled
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if not is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services))
def disabled(name):
'''
.. versionadded:: 2014.7.0
Return True if the named service is disabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return name in get_disabled()
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/openbsdservice.py | Python | apache-2.0 | 8,008 | 0.000999 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.shortcuts import render, redirect
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Sum, Count, Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
import django.views.defaults
from weblate.trans.models import (
Project, SubProject, Translation, Check,
Dictionary, Change, Unit, WhiteboardMessage
)
from weblate.requirements import get_versions, get_optional_versions
from weblate.lang.models import Language
from weblate.trans.forms import (
get_upload_form, SearchForm,
AutoForm, ReviewForm, NewLanguageForm,
UserManageForm,
)
from weblate.accounts.models import Profile, notify_new_language
from weblate.trans.views.helper import (
get_project, get_subproject, get_translation,
try_set_language,
)
import weblate
import datetime
from urllib import urlencode
def home(request):
"""
Home page of Weblate showing list of projects, stats
and user links if logged in.
"""
if 'show_set_password' in request.session:
messages.warning(
request,
_(
'You have activated your account, now you should set '
'the password to be able to login next time.'
)
)
return redirect('password')
wb_messages = WhiteboardMessage.objects.all()
projects = Project.objects.all_acl(request.user)
if projects.count() == 1:
projects = SubProject.objects.filter(
project=projects[0]
).select_related()
# Warn about not filled in username (usually caused by migration of
# users from older system
if not request.user.is_anonymous() and request.user.first_name == '':
messages.warning(
request,
_('Please set your full name in your profile.')
)
# Some stats
top_translations = Profile.objects.order_by('-translated')[:10]
top_suggestions = Profile.objects.order_by('-suggested')[:10]
last_changes = Change.objects.last_changes(request.user)[:10]
return render(
request,
'index.html',
{
'projects': projects,
'top_translations': top_translations.select_related('user'),
'top_suggestions': top_suggestions.select_related('user'),
'last_changes': last_changes,
'last_changes_rss': reverse('rss'),
'last_changes_url': '',
'search_form': SearchForm(),
'whiteboard_messages': wb_messages,
}
)
def search(request):
"""
Performs site-wide search on units.
"""
search_form = SearchForm(request.GET)
context = {
'search_form': search_form,
}
if search_form.is_valid():
units = Unit.objects.search(
None,
search_form.cleaned_data,
).select_related(
'translation',
)
# Filter results by ACL
acl_projects, filtered = Project.objects.get_acl_status(request.user)
if filtered:
units = units.filter(
translation__subproject__project__in=acl_projects
)
limit = request.GET.get('limit', 50)
page = request.GET.get('page', 1)
paginator = Paginator(units, limit)
try:
units = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
units = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of
# results.
units = paginator.page(paginator.num_pages)
context['page_obj'] = units
context['title'] = _('Search for %s') % (
search_form.cleaned_data['q']
)
context['query_string'] = search_form.urlencode()
context['search_query'] = search_form.cleaned_data['q']
else:
messages.error(request, _('Invalid search query!'))
return render(
request,
'search.html',
context
)
def show_engage(request, project, lang=None):
# Get project object, skipping ACL
obj = get_project(request, project, skip_acl=True)
# Handle language parameter
language = None
if lang is not None:
language = try_set_language(lang)
context = {
'object': obj,
'project': obj,
'languages': obj.get_language_count(),
'total': obj.get_total(),
'percent': obj.get_translated_percent(language),
'url': obj.get_absolute_url(),
'language': language,
}
# Render text
if language is None:
status_text = _(
'<a href="%(url)s">Translation project for %(project)s</a> '
'currently contains %(total)s strings for translation and is '
'<a href="%(url)s">being translated into %(languages)s languages'
'</a>. Overall, these translations are %(percent)s%% complete.'
)
else:
# Translators: line of text in engagement widget, please use your
# language name instead of English
status_text = _(
'<a href="%(url)s">Translation project for %(project)s</a> into '
'English currently contains %(total)s strings for translation and '
'is %(percent)s%% complete.'
)
if 'English' in status_text:
status_text = status_text.replace('English', language.name)
context['status_text'] = mark_safe(status_text % context)
return render(
request,
'engage.html',
context
)
def show_project(request, project):
obj = get_project(request, project)
dict_langs = Dictionary.objects.filter(
project=obj
).values_list(
'language', flat=True
).distinct()
dicts = []
for language in Language.objects.filter(id__in=dict_langs):
dicts.append(
{
'language': language,
'count': Dictionary.objects.filter(
language=language,
project=obj
).count(),
}
)
last_changes = Change.objects.prefetch().filter(
Q(translation__subproject__project=obj) |
Q(dictionary__project=obj)
)[:10]
return render(
request,
'project.html',
{
'object': obj,
'project': obj,
'dicts': dicts,
'last_changes': last_changes,
'last_changes_rss': reverse(
'rss-project',
kwargs={'project': obj.slug}
),
'last_changes_url': urlencode(
{'project': obj.slug}
),
'add_user_form': UserManageForm(),
}
)
def show_subproject(request, project, subproject):
obj = get_subproject(request, project, subproject)
last_changes = Change.objects.prefetch().filter(
translation__subproject=obj
)[:10]
new_lang_form = NewLanguageForm()
return render(
request,
'subproject.html',
{
'object': obj,
'project': obj.project,
'translations': obj.translation_set.enabled(),
'show_language': 1,
'last_changes': last_changes,
'last_changes_rss': reverse(
'rss-subproject',
kwargs={'subproject': obj.slug, 'project': obj.project.slug}
),
'last_changes_url': urlencode(
{'subproject': obj.slug, 'project': obj.project.slug}
),
'new_lang_form': new_lang_form,
}
)
def show_translation(request, project, subproject, lang):
obj = get_translation(request, project, subproject, lang)
last_changes = Change.objects.prefetch().filter(
translation=obj
)[:10]
# Check locks
obj.is_locked(request.user)
# Get form
form = get_upload_form(request)()
# Is user allowed to do automatic translation?
if request.user.has_perm('trans.automatic_translation'):
autoform = AutoForm(obj)
else:
autoform = None
# Search form for everybody
search_form = SearchForm()
# Review form for logged in users
if request.user.is_anonymous():
review_form = None
else:
review_form = ReviewForm(
initial={
'date': datetime.date.today() - datetime.timedelta(days=31)
}
)
return render(
request,
'translation.html',
{
'object': obj,
'project': obj.subproject.project,
'form': form,
'autoform': autoform,
'search_form': search_form,
'review_form': review_form,
'last_changes': last_changes,
'last_changes_url': urlencode(obj.get_kwargs()),
'last_changes_rss': reverse(
'rss-translation',
kwargs=obj.get_kwargs(),
),
'show_only_component': True,
'other_translations': Translation.objects.filter(
subproject__project=obj.subproject.project,
language=obj.language,
).exclude(
pk=obj.pk
),
}
)
def not_found(request):
"""
Error handler showing list of available projects.
"""
return render(
request,
'404.html',
{
'request_path': request.path,
'title': _('Page Not Found'),
},
status=404
)
def denied(request):
"""
Error handler showing list of available projects.
"""
return render(
request,
'403.html',
{
'request_path': request.path,
'title': _('Permission Denied'),
},
status=403
)
def server_error(request):
"""
Error handler for server errors.
"""
try:
return render(
request,
'500.html',
{
'request_path': request.path,
'title': _('Internal Server Error'),
},
status=500,
)
except Exception:
return django.views.defaults.server_error(request)
def about(request):
"""
Shows about page with version information.
"""
context = {}
totals = Profile.objects.aggregate(
Sum('translated'), Sum('suggested'), Count('id')
)
total_strings = 0
total_words = 0
for project in SubProject.objects.iterator():
try:
translation = project.translation_set.all()[0]
total_strings += translation.total
total_words += translation.total_words
except (IndexError, Translation.DoesNotExist):
pass
context['title'] = _('About Weblate')
context['total_translations'] = totals['translated__sum']
context['total_suggestions'] = totals['suggested__sum']
context['total_users'] = totals['id__count']
context['total_strings'] = total_strings
context['total_words'] = total_words
context['total_languages'] = Language.objects.filter(
translation__total__gt=0
).distinct().count()
context['total_checks'] = Check.objects.count()
context['ignored_checks'] = Check.objects.filter(ignore=True).count()
context['versions'] = get_versions() + get_optional_versions()
return render(
request,
'about.html',
context
)
def data_root(request):
return render(
request,
'data-root.html',
{
'hooks_docs': weblate.get_doc_url('api', 'hooks'),
'api_docs': weblate.get_doc_url('api', 'exports'),
'rss_docs': weblate.get_doc_url('api', 'rss'),
}
)
def data_project(request, project):
obj = get_project(request, project)
return render(
request,
'data.html',
{
'object': obj,
'project': obj,
'hooks_docs': weblate.get_doc_url('api', 'hooks'),
'api_docs': weblate.get_doc_url('api', 'exports'),
'rss_docs': weblate.get_doc_url('api', 'rss'),
}
)
@login_required
def new_language(request, project, subproject):
obj = get_subproject(request, project, subproject)
form = NewLanguageForm(request.POST)
if form.is_valid():
language = Language.objects.get(code=form.cleaned_data['lang'])
same_lang = obj.translation_set.filter(language=language)
if same_lang.exists():
messages.error(
request,
_('Chosen translation already exists in this project!')
)
elif obj.new_lang == 'contact':
notify_new_language(obj, language, request.user)
messages.success(
request,
_(
"A request for a new translation has been "
"sent to the project's maintainers."
)
)
elif obj.new_lang == 'add':
obj.add_new_language(language, request)
else:
messages.error(
request,
_(
'Please choose the language into which '
'you would like to translate.'
)
)
return redirect(
'subproject',
subproject=obj.slug,
project=obj.project.slug
)
| leohmoraes/weblate | weblate/trans/views/basic.py | Python | gpl-3.0 | 14,336 | 0 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='URL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(
help_text=(
"Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use "
"'flatpages/default.html'."
), max_length=70, verbose_name='template name', blank=True
)),
('registration_required', models.BooleanField(
default=False, help_text='If this is checked, only logged-in users will be able to view the page.',
verbose_name='registration required'
)),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
| sametmax/Django--an-app-at-a-time | ignore_this_directory/django/contrib/flatpages/migrations/0001_initial.py | Python | mit | 1,710 | 0.004678 |
from __future__ import division
def humanize_bytes(n, precision=2):
# Author: Doug Latornell
# Licence: MIT
# URL: http://code.activestate.com/recipes/577081/
"""Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 B'
>>> humanize_bytes(1024, precision=1)
'1.0 kB'
>>> humanize_bytes(1024 * 123, precision=1)
'123.0 kB'
>>> humanize_bytes(1024 * 12342, precision=1)
'12.1 MB'
>>> humanize_bytes(1024 * 12342, precision=2)
'12.05 MB'
>>> humanize_bytes(1024 * 1234, precision=2)
'1.21 MB'
>>> humanize_bytes(1024 * 1234 * 1111, precision=2)
'1.31 GB'
>>> humanize_bytes(1024 * 1234 * 1111, precision=1)
'1.3 GB'
"""
abbrevs = [
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'B')
]
if n == 1:
return '1 B'
for factor, suffix in abbrevs:
if n >= factor:
break
# noinspection PyUnboundLocalVariable
return '%.*f %s' % (precision, n / factor, suffix)
| Irdroid/httpie | httpie/utils.py | Python | bsd-3-clause | 1,164 | 0.000859 |
#!/bin/env python
import os
import re
import sys
import time
import pickle
import random
import socket
import os.path
import traceback
import subprocess
from optparse import OptionParser
VERSION='1.1.0.7'
COLOR = {
'success' : '\33[2;32m', # Green
'fail' : '\033[2;31m', # Red
'bad' : '\033[31;07m', # Red Highlight
'warn' : '\033[3;43m', # Yellow Highlight
'normal' : '\033[0;39m', # Black
'note' : '\033[0;34m' # NOPEN Blue
}
class autoutils:
def __init__(self):
# Set the Colors
self.COLOR_SUCCESS = COLOR['success']
self.COLOR_FAILURE = COLOR['fail']
self.COLOR_BADFAILURE = COLOR['bad']
self.COLOR_WARNING = COLOR['warn']
self.COLOR_NORMAL = COLOR['normal']
self.COLOR_NOTE = COLOR['note']
# Set directories
self.opdir = '/current'
self.opup = '%s/up' % self.opdir
self.opbin = '%s/bin' % self.opdir
self.opetc = '%s/etc' % self.opdir
self.opdown = '%s/down' % self.opdir
self.optmp = '%s/tmp' % self.opdir
# Set Python module path
sys.path = [self.opetc,self.optmp] + sys.path
# must have this
if not os.environ.has_key('NOPEN_AUTOPORT'):
sys.stderr.write('Could not find NOPEN_AUTOPORT variable. ' +
'Must call from NOPEN -gs.\n')
sys.exit(1)
# Nopen ENV Variables
self.nopen_autoport = int(os.environ['NOPEN_AUTOPORT'])
self.nopen_serverinfo = os.environ['NOPEN_SERVERINFO']
self.nopen_clientver = os.environ['NOPEN_CLIENTVER']
self.nopen_mylog = os.environ['NOPEN_MYLOG']
self.nopen_rhostname = os.environ['NOPEN_RHOSTNAME']
self.nopen_nhome = os.environ['NHOME']
self.nopen_mypid = os.environ['NOPEN_MYPID']
self.optargetcommands = os.path.join(
self.opdown, '%s_targetcommands' % self.nopen_rhostname)
# This is the nopen autoport socket
self.connected = False
self.nopen_socket = None
self.nopen = None
self.pid = os.getpid()
self.hidden_dir = ''
self.status = {}
self.statusFile = os.path.join(self.optmp,
'%s.%s_pystatus' % (self.nopen_rhostname, self.nopen_mypid))
self.stateFile = os.path.join(self.optmp, '%s_pystate' % self.nopen_rhostname)
self.state = {
'linux': False,
'solaris': False,
'hpux': False,
'hpux_it': False
}
self.tunnel = None
self.perl_return = False
self.perl_sock_file = ''
return
#
# Saves self.state dictionary into a file
#
def saveState(self):
f = open(self.stateFile, 'wb')
pickle.dump(self.state, f)
f.close()
#
# Loads a previously saved state
#
def loadState(self):
if os.path.exists(self.stateFile):
f = open(self.stateFile, 'rb')
self.state = pickle.load(f)
f.close()
#
# Yea...
#
def help(self, word):
print ' ___ '
print ' |_____ | '
print ' || | | '
print ' || | | '
print ' ||O O| | Looks like you\'re trying to %s' % str(word).upper()
print ' || | | Want some help?'
print ' || U | | '
print ' || | || '
print ' || | || '
print ' ||| | || '
print ' ||| | || '
print ' ||| | || '
print ' ||| | || '
print ' |||__| || '
print ' ||_____|| '
print ' |_______| '
return
#
# Takes out any autoutils stuff and then calls the parser's
# parse_args() method.
# args should be an array without the program name (sys.argv[1:])
#
def parseArgs(self, parser, args, values=None):
if len(args) > 0:
if args[0].startswith('perl:'):
self.perl_return = True
self.perl_sock_file = sys.argv[1].split(':', 1)[1]
args = args[1:]
return parser.parse_args(args, values)
#
# Makes the connection to the NOPEN autoport.
# This takes care of the forking too.
#
def connect(self):
os.close(sys.stdout.fileno())
sys.stdout = sys.stderr
if not self.connected:
self.nopen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.nopen_socket.connect(('127.0.0.1', self.nopen_autoport))
self.nopen = self.nopen_socket.makefile()
self.connected = True
pid = os.fork()
if pid != 0:
self.nopen.close()
self.nopen_socket.close()
sys.exit(0)
self.pid = os.getpid()
self.nopen.write('#NOGS\n')
self.nopen.flush()
# going to run -status every time because something could change
# between runs and don't want to get caught with something bad.
self.parsestatus()
#if not os.path.exists(self.statusFile):
# self.parsestatus()
#else:
# f = open(self.statusFile, 'rb')
# self.status = pickle.load(f)
# f.close()
self.loadState()
self.saveState()
return self.nopen
#
# Does any final stuff with the output, like sending it to a calling
# perl script, then returns back a string of the argument, or unchanged
# if mkstr is False.
#
def finish(self, ret=None, mkstr=True):
if self.connected:
self.cleanup()
if not ret:
ret = ''
if mkstr:
if ret.__class__() == []:
ret_str = '\n'.join(ret) + '\n'
else:
ret_str = str(ret)
ret = ret_str
if self.perl_return:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM);
sock.connect((self.perl_sock_file))
sock.send(ret_str)
sock.close()
except:
print 'Could not connect to %s' % self.perl_sock_file
return ret
#
# Returns a list of any hidden directories found.
#
def getHidden(self, refresh=False):
tmpfile_chrs = '[A-Za-z0-9_-]' * 6
parent_dirs_old = [
'/var/tmp',
'/lib',
'/dev',
'/etc',
'/',
]
dirs_old = [
'.%s' % ('[A-Fa-f0-9]' * 16),
'.tmp%s' % tmpfile_chrs,
]
dir_regexs = [
'/var/spool/.cron%s.bak' % tmpfile_chrs,
'/var/log/.fsck%s.bak' % tmpfile_chrs,
'/lib/.security%s.lock' % tmpfile_chrs,
'/dev/.usb%s.lock' % tmpfile_chrs,
'/etc/.dev%s.save' % tmpfile_chrs,
'/var/tmp/.%s-unix' % tmpfile_chrs,
'/.opt%s.save' % tmpfile_chrs,
]
for pd in parent_dirs_old:
for d in dirs_old:
dir_regexs.append(os.path.join(pd, d))
parent_dirs = []
for dr in dir_regexs:
d = os.path.dirname(dr)
if not d in parent_dirs:
parent_dirs.append(d)
lsfile = os.path.join(self.opdown,
'stoichunt.%s' % self.nopen_rhostname)
if not os.path.exists(lsfile):
refresh = True
if refresh:
self.preserveFiles(lsfile)
output, nopenlines, outputlines = self.doit(
'-ls %s > T:%s' % (' '.join(parent_dirs), lsfile))
else:
outputlines = file_readlines(lsfile)
files = [x.strip('\n').split(None, 9)[-1] for x in outputlines]
dirs = []
for f in files:
for r in dir_regexs:
if re.match(r, f):
dirs.append((f, r))
if not refresh:
# do a listing of the specific dir's regex to confirm it's there,
# only if it wasn't just done
tolist = ' '.join([x[1] for x in dirs])
if tolist:
output, nopenlines, outputlines = self.doit('-ls -d %s' % tolist)
dirs = [x.strip('\n').split(None, 9)[-1] for x in outputlines]
else:
dirs = [x[0] for x in dirs]
return dirs
#
# Ask a question prompt and return the input.
#
def getInput(self, prompt, default=None, color=COLOR['fail']):
if not prompt:
return None
if default:
print '%s%s [%s]:%s' % (color, prompt, default, self.COLOR_NORMAL),
else:
print '%s%s:%s' % (color, prompt, self.COLOR_NORMAL),
sys.stdout.flush()
answer = raw_input().strip()
if (not answer or answer == '') and default:
return default
return answer
#
# pause and wait for a <Return>.
#
def pause(self):
print
print '%sHit <Return> to continue.%s' % (COLOR['warn'],COLOR['normal'])
sys.stdout.flush()
answer = raw_input().strip()
return
#
# Not sure what is is supposed to do yet
#
def callPerl(self, cmd):
self.nopen.write("%s\n" % cmd)
self.nopen.flush()
return
#
# Return the current working directory.
#
def getcwd(self):
return self.status['targetcwd']
#
# Parses the output of -status and sets the values dictionary.
#
def parsestatus(self):
local = True
values = { 'clientver': '',
'histfile': '',
'cmdoutfile': '',
'localcwd': '',
'nhome': '',
'localpid': '',
'serverver': '',
'wdir': '',
'targetos': '',
'targetcwd': '',
'targetpid': '',
'targetppid': '',
'targetport': '' }
re_status = re.compile('(?P<name>.+[^\s]{1,})\s{2,}(?P<value>.+$)')
output, nopenlines, lines = self.doit('-status')
for line in lines:
line = line.strip()
if line == '[-status]' or not line: continue
if line == 'Local' or line == 'Connection': continue
if line == 'Remote':
local = False
continue
match = re_status.search(line)
if not match:
continue
name = match.group('name')
value = match.group('value')
if name == 'NOPEN client':
values['clientver'] = value
elif name == 'History':
values['histfile'] = value
elif name == 'Command Out':
values['cmdoutfile'] = value
elif name == 'CWD' and local is True:
values['localcwd'] = value
elif name == 'CWD' and local is False:
values['targetcwd'] = value
elif name == 'NHOME':
values['nhome'] = value
elif name == 'PID (PPID)' and local is True:
values['localpid'] = value
elif name.startswith('Remote'):
port = value[value.rfind(':')+1:value.rfind('\)')]
values['targetport'] = port
elif name == 'PID (PPID)' and local is False:
pid,ppid = value.split(' ')
values['targetpid'] = pid[1:-1]
values['targetppid'] = ppid[1:-2]
elif name == 'OS':
values['targetos'] = value
if value.find('SunOS') != -1:
self.state['solaris'] = True
elif value.find('Linux') != -1:
self.state['linux'] = True
elif value.find('HP-UX') != -1:
self.state['hpux'] = True
if value.find('ia64') != -1:
self.state['hpux_it'] = True
elif name == 'NOPEN server':
values['serverver'] = value
elif name == 'WDIR':
values['wdir'] = value
self.status = values
f = open(self.statusFile, 'wb')
pickle.dump(self.status, f)
f.close()
return
#
# Prints output to the NOPEN window by writing to a local
# file and "-lsh cat" that file.
#
def doprint(self, *msgs):
whatout = os.path.join(self.optmp, '.whatout.%d' % self.pid)
fd = open(whatout, 'w')
for m in msgs:
if m.__class__() == []:
for m2 in m:
fd.write(m2)
else:
fd.write(m)
fd.write('%s\n' % self.COLOR_NORMAL)
fd.close()
self.doit('-lsh cat %s' % whatout)
return
#
# Runs "-lsh echo" with the string. Should probably use self.doprint()
# instead to avoid dealing with escaping issues.
#
def dolocalecho(self, cmd):
self.nopen.write('-lsh echo "%s"\n' % cmd)
self.nopen.flush()
return
#
# Runs a command through the NOPEN autoport.
# Returns a 3-tuple of two strings and a list: (output, nopenoutput, outputlines)
# output - string of non-nopen lines
# nopenoutput - string of nopen lines
# outputlines - list of non-nopen lines
#
def doit(self, cmd):
if not cmd.startswith('-put') and not cmd.startswith('mkdir'):
cmd = '%s -nohist' % cmd
first = True
self.nopen.write('%s\n' % cmd)
self.nopen.flush()
nopenlines = []
outputlines = []
re_nopenline = re.compile('^\[.*\]$')
while self.nopen:
try:
line = self.nopen.readline().strip('\n\r')
except socket.error:
continue
if line.startswith('NO! '):
nopenlines.append(line)
break
elif re_nopenline.match(line):
nopenlines.append(line)
else:
outputlines.append(line)
return ('\n'.join(outputlines), '\n'.join(nopenlines), outputlines)
#
# Runs the command and writes the output to a local file in opdown
# (for builtins) or optargetcommands. Force to optargetcommands
# with tgtcmd (execept builtins will ignore this).
# Returns a 2-tuple of the output content and the output file name:
# (output, outfilename)
#
def doitwrite(self, cmd):
(out, nout, outlines) = self.doit(cmd)
# find the actual command in the nopen output and use that for
# creating a file name (unless it was redirected)
realcmd = None
noutlines = nout.split('\n')
for line in noutlines:
if re.search('Saving +output to', line) or \
re.search('^\[.*\]\[.* -. .*\]$', line):
continue
else:
r = re.search('\[(.*)\]', line)
if r != None:
realcmd = r.group(1)
break
if not realcmd:
realcmd = cmd
# cleanup the command string
tmpcmd = realcmd.strip()
tmpcmd = tmpcmd.replace('-nohist', '')
r = re.search('^(.*)\s*[\.\>]+\s*(L:|T:).*', tmpcmd)
if r != None:
tmpcmd = r.group(1).strip()
if tmpcmd[0] == '\\':
tmpcmd = tmpcmd[1:]
tmpcmd = re.sub('[\*\!\s/\\\$\&>\|]', '_', tmpcmd)
tmpcmd = re.sub(';', '+', tmpcmd)
if tmpcmd[0] in ['-', '=']:
filename = os.path.join(
self.opdown, '%s__%s' % (tmpcmd[1:], self.nopen_rhostname))
elif realcmd.find('|') >= 0:
filename = os.path.join(
self.opdown, '%s__%s' % (tmpcmd, self.nopen_rhostname))
else:
filename = os.path.join(self.optargetcommands, tmpcmd)
# truncate long files
filename = filename[:2000]
filename = '%s__%s' % (filename, timestamp())
# just to be safe
self.preserveFiles(filename)
fd = open(filename, 'w')
fd.write('# %s\n\n' % cmd)
fd.write(out)
fd.write('\n')
fd.close()
return (out, filename)
#
# Takes an existing file and renames it to "filename_###".
# files can be a single filename or list of filenames.
#
def preserveFiles(self, files, loud=False):
retarr = []
if files.__class__() == '':
files = [files]
for f in files:
if os.path.exists(f):
ext_num = 0
while os.path.exists('%s_%04d' % (f, ext_num)):
ext_num += 1
newname = '%s_%04d' % (f, ext_num)
if loud:
print '\n%s%s: File exists, renaming to %s%s\n' % \
(self.COLOR_WARNING, f, newname, self.COLOR_NORMAL)
os.rename(f, newname)
retarr.append(newname)
return retarr
#
# Not sure if used yet...
#
def startTunnel(self, autoport=None):
from control import control
import random
if not autoport:
autoport = random.randint(20000,60000)
self.callPerl("-tunnel %d tcp autoclose" % autoport)
self.tunnel = control(int(autoport))
self.tunnel.main()
#
# Not sure if used yet...
#
def stopTunnel(self):
if self.tunnel:
self.tunnel.s.send("c 1 2 3 4 5 6 7 8 9\n")
time.sleep(1)
self.tunnel.finish()
#
# Cleans up. Right now just closes the autoport socket.
#
def cleanup(self):
self.nopen_socket.close()
self.connected = False
self.nopen = None
return
#
# Returns 2-tuple (rx_MB, tx_MB), both floats.
#
def bwsofar(self):
bwfile = os.path.join(self.opdown, 'bwmonitor.txt')
if os.path.exists(bwfile):
tx_re = None
rx_re = None
# try twice in case tail -2 fails for some reason
for i in range(0, 2):
tail = execute('tail -2 %s' % bwfile)
lines = tail.strip().split('\n')
if len(lines) != 2:
continue
tx_re = re.search('^\s*TX\s+(\d+)', lines[0])
rx_re = re.search('^\s*RX\s+(\d+)', lines[1])
if tx_re != None and rx_re != None:
break
if tx_re == None or rx_re == None:
return (0, 0)
return (int(rx_re.group(1)) / 1048576.0, int(tx_re.group(1)) / 1048576.0)
else:
# TODO: go through pcaps dir and calculate size?
return (0, 0)
#
# Pops a window with custom text
# - pillaged largely from doprint()
#
def textpopup(self, xargs, *msgs):
whatout = os.path.join(self.optmp, '.whatout.%d.%d' % \
(self.pid, random.randint(1,10000)))
fd = open(whatout, 'w')
for m in msgs:
if m.__class__() == []:
for m2 in m:
fd.write(m2)
else:
fd.write(m)
fd.close()
if xargs == None:
xargs = '-geometry 88x58 -bg white -fg blue'
self.filepopup(whatout, xargs)
return
#
# Pops an xterm with a provided file and geometry
# - pillaged largely from execute()
#
def filepopup(self, file, xargs='-geometry 88x58 -bg white -fg blue'):
if not os.path.exists(file):
self.doprint(COLOR['fail'], 'Error: the file %s doesn\'t exist.' % file)
else:
pid = os.fork()
if pid == 0:
cmd = 'xterm %s -e view %s' % (xargs,file)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
os._exit(0)
return
#
# Returns a name for a temporary file using the convention of
# prefix and NOPEN pid, ensuring the file doesn't already exist.
# Input is a directory and prefix concatenated, such as "/var/tmp/d"
# or "/dev/shm/.tmp".
#
def tmpfilename(self, directoryprefix):
number = int(self.nopen_mypid)
output = "dummy"
while output:
output, nopenoutput, outputlines = self.doit("-lt %s.%d" % (directoryprefix, number))
number += 1
number -= 1
return "%s.%d" % (directoryprefix, number)
###############################################################################
#
# Use this class when doing option parsing.
#
# By default, the epilog (to print additional help info) will strip off
# newlines, so this class overrides it and returns it as-is.
#
class OptParser(OptionParser):
#
# Don't do any formatting on the epilog.
#
def format_epilog(self, formatter):
return self.epilog
###############################################################################
# Misc methods
###############################################################################
#
# Locally execute a command and return the stdout output,
# since os.system() does not return output.
#
def execute(cmd, indata=None):
stdin = None
if indata:
stdin = subprocess.PIPE
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=stdin,
stderr=subprocess.PIPE, shell=True)
if indata:
proc.stdin.write(indata)
proc.stdin.close()
output = proc.stdout.read()
return output
#
# Split list l into n sized chunks.
#
def chunks(l, n):
return list(chunks_h(l, n))
def chunks_h(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
#
# Returns readlines() output from a file
#
def file_readlines(filename):
try:
fd = open(filename, 'r')
lines = fd.readlines()
fd.close()
return lines
except:
return []
#
# Returns timestamp in format of YYYYMMDD-HHMMSS
#
def timestamp():
return time.strftime('%Y%m%d-%H%M%S', time.gmtime())
if __name__ == "__main__":
print "Not to be called directly..."
sys.exit(1)
| DarthMaulware/EquationGroupLeaks | Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/etc/autoutils.py | Python | unlicense | 22,459 | 0.005699 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MetricsAdvisorConfiguration
from .operations import MetricsAdvisorOperationsMixin
from .. import models
class MetricsAdvisor(MetricsAdvisorOperationsMixin):
"""Microsoft Azure Metrics Advisor REST API (OpenAPI v2).
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://:code:`<resource-name>`.cognitiveservices.azure.com).
:type endpoint: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}/metricsadvisor/v1.0'
self._config = MetricsAdvisorConfiguration(credential, endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MetricsAdvisor":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/aio/_metrics_advisor.py | Python | mit | 3,345 | 0.003886 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
from airflow.providers.jdbc.operators.jdbc import JdbcOperator
class TestJdbcOperator(unittest.TestCase):
def setUp(self):
self.kwargs = dict(
sql='sql',
task_id='test_jdbc_operator',
dag=None
)
@patch('airflow.providers.jdbc.operators.jdbc.JdbcHook')
def test_execute(self, mock_jdbc_hook):
jdbc_operator = JdbcOperator(**self.kwargs)
jdbc_operator.execute(context={})
mock_jdbc_hook.assert_called_once_with(jdbc_conn_id=jdbc_operator.jdbc_conn_id)
mock_jdbc_hook.return_value.run.assert_called_once_with(
jdbc_operator.sql, jdbc_operator.autocommit, parameters=jdbc_operator.parameters)
| wileeam/airflow | tests/providers/jdbc/operators/test_jdbc.py | Python | apache-2.0 | 1,539 | 0.0013 |
from spec.python import db_connection
from sam import constants
from sam import common
from sam import integrity
import traceback
mysql_params = constants.dbconfig.copy()
sqlite_params = constants.dbconfig.copy()
mysql_params['dbn'] = 'mysql'
mysql_params['db'] = 'samapper_test'
sqlite_params['dbn'] = 'sqlite'
sqlite_params['db'] = '/tmp/sam_test.db'
db_mysql, _ = common.get_db(mysql_params)
db_sqlite, _2 = common.get_db(sqlite_params)
def test_mysql_access():
print(mysql_params)
assert integrity.check_and_fix_db_access_MySQL(mysql_params) == 0
def test_sqlite_access():
assert integrity.check_and_fix_db_access_SQLite(sqlite_params) == 0
def test_mysql_shared_tables():
try:
errors = integrity.check_shared_tables(db_mysql)
integrity.fix_shared_tables(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_shared_tables():
try:
errors = integrity.check_shared_tables(db_sqlite)
integrity.fix_shared_tables(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_UDF():
integrity.fix_UDF_MySQL(db_mysql)
rows = db_mysql.query('SELECT decodeIP(1234567890)')
assert rows.first().values()[0] == '73.150.2.210'
rows = db_mysql.query('SELECT encodeIP(12,34,56,78)')
assert rows.first().values()[0] == 203569230L
def test_sqlite_UDF():
integrity.fix_UDF_SQLite(db_sqlite)
rows = db_sqlite.query('SELECT decodeIP(1234567890)')
assert rows.first().values()[0] == '73.150.2.210'
rows = db_sqlite.query('SELECT encodeIP(12,34,56,78)')
assert rows.first().values()[0] == 203569230L
def test_mysql_def_subscription():
try:
errors = integrity.check_default_subscription(db_mysql)
integrity.fix_default_subscription(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_def_subscription():
try:
errors = integrity.check_default_subscription(db_sqlite)
integrity.fix_default_subscription(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_subscriptions():
try:
errors = integrity.check_default_subscription(db_mysql)
integrity.fix_default_subscription(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_subscriptions():
try:
errors = integrity.check_subscriptions(db_sqlite)
integrity.fix_subscriptions(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_settings():
try:
errors = integrity.check_settings(db_mysql)
integrity.fix_settings(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_settings():
try:
errors = integrity.check_settings(db_sqlite)
integrity.fix_settings(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_datasources():
try:
errors = integrity.check_data_sources(db_mysql)
integrity.fix_data_sources(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_datasources():
try:
errors = integrity.check_data_sources(db_sqlite)
integrity.fix_data_sources(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_session():
try:
errors = integrity.check_sessions_table(db_mysql)
integrity.fix_sessions_table(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_session():
try:
errors = integrity.check_sessions_table(db_sqlite)
integrity.fix_sessions_table(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_check_and_fix_integrity_mysql():
mysqlconfig = {
'dbn': "mysql",
'db': 'sam_integrity_test',
'host': "localhost",
'user': "root",
'pw': constants.dbconfig['pw'],
'port': 3306
}
db, dbq = common.get_db(mysqlconfig)
try:
integrity.check_and_fix_integrity(db, mysqlconfig)
finally:
db.query("drop database sam_integrity_test")
def test_check_and_fix_integrity_sqlite():
sqliteconfig = {
'dbn': "sqlite",
'db': '',
'host': "localhost",
'user': "root",
'pw': constants.dbconfig['pw'],
'port': 3306
}
db, dbq = common.get_db(sqliteconfig)
integrity.check_and_fix_integrity(db, sqliteconfig)
| JoePelz/SAM | spec/python/test_integrity.py | Python | gpl-3.0 | 4,602 | 0.002825 |
from pysv import ast_utils
from pysv import ssa_converter
from pysv import utils
from pysv import loops
from pysv import interm
from pysv.smt2 import ProgramSmt2
def get_code_in_smt2(code, code_pre, code_post, program_vars, env, holes_decls = None):
"""Converts source codes of specification elements into program in SMT-LIB 2.0 language.
:param code: (str) Source code (in arbitrary language) of the program.
:param code_pre: (str) Source code (in arbitrary language) of the expression representing all *pre-conditions*.
:param code_post: (str) Source code (in arbitrary language) of the expression representing all *post-conditions*.
:param program_vars: (ProgramVars) Information about variables and their types.
:param env: (Options) Options of the currently realized task.
:param holes_decls: (list[HoleDecl]) Declarations of all holes in the case of synthesis scenario.
:return: (ProgramSmt2) Program in the SMT-LIB 2.0 language.
"""
if env.lang == utils.Options.PYTHON:
return convert_py_to_smt2(code, code_pre, code_post, program_vars, env, holes_decls)
elif env.lang == utils.Options.SMT2:
code = processHoles(code, holes_decls)
main = ProgramSmt2([code]) # this works because SMT lib is a functional language
return main, ProgramSmt2([code_pre]), ProgramSmt2([code_post])
else:
raise Exception(str(env.lang) + ": unsupported language!")
def processHoles(code, holes_decls):
"""Finds all hole symbols in the SMT-LIB code of the program and replaces them with
appropriate references to their synthesis functions. Does nothing in case of
verification.
:param code: (str) Source code (in arbitrary language) of the program.
:param holes_decls: (list[HoleDecl]) Declarations of all holes.
:return: (str) Source code with SMT replacement of holes by appropriate functions.
"""
if holes_decls is None or len(holes_decls) == 0:
return code
else:
code = code.replace(")", " )")
for h in holes_decls:
if h.id in code:
code = code.replace(h.id+" ", h.get_function_call()+" ")
code = code.replace(" )", ")")
return code
def convert_py_to_smt2(code, code_pre, code_post, program_vars, env, holes_decls):
# Python source code --> internal abstract program representation.
ib, pre, post = ast_utils.process_source_code(code, code_pre, code_post, holes_decls)
utils.logger.debug('\n\n******** PROGRAM REPR ********:\n' + str(ib))
# Handling of loops
ib = interm.ProgramInterm(loops.unroll_loops(ib.src, n=env.loop_unrolling_level))
# Abstract program representation --> abstract program representation in SSA form.
if env.ssa_enabled:
ib, post = ssa_converter.convert(ib, post, program_vars)
program_vars.add_marked_variables(ib.src.collect_variables()) # Updating variable list
# Producing SMT-LIB code for program's elements.
ib_smt2 = ib.to_smt2(env)
pre_smt2 = pre.to_smt2(env)
post_smt2 = post.to_smt2(env)
return ib_smt2, pre_smt2, post_smt2
def write_script_to_file(script, env):
if env.save_script_to_file:
with open('script.smt2', 'w') as file_:
file_.write(script) | iwob/pysv | pysv/smt_common.py | Python | mit | 3,263 | 0.005823 |
# -*- encoding: utf-8 -*-
from .cursebox import Cursebox
from .colors import colors
from .constants import EVENT_SKIP
from .utils import hex_to_rgb
logo = [u" █ ",
u"█▀█ █ █ █▀█ █▀▀ █▀█ █▀▄ █▀█ █▄█",
u"█ █ █ █ ▀▀█ █▄█ █ █ █ █ ▄█▄",
u"█▄█ █▄█ █ ▄▄█ █▄▄ █▄█ █▄█ █ █"]
grey = colors.from_rgb((127, 127, 127))
rainbow = ["ffffff", "ffaaaa", "ff5555", "ff0000",
"ff6d00", "ffda00", "b6ff00", "48ff00",
"00ff24", "00ff91", "00ffff", "0091ff",
"0024ff", "4800ff", "b600ff", "ff00da",
"ff006d", "ff0000", "ff5555", "ffaaaa"]
prompt = "cursebox v1.0 - Press any key to exit"
def demo():
l_width, l_height = len(logo[0]), len(logo)
x_s = 0.4
palette = [colors.from_rgb(hex_to_rgb(hex)) for hex in rainbow]
padding = [colors.white] * (int(x_s * l_width) + 3)
palette = padding + palette + padding
with Cursebox(blocking_events=False) as cb:
width, height = cb.width, cb.height
def draw_logo(t):
for y0, line in enumerate(logo):
y1 = (height - l_height) / 2 + y0
for x0, char in enumerate(line):
x1 = x0 + (width - l_width) / 2
offset = int(t + y0 + x_s * x0) % len(palette)
cb.put(x=x1, y=y1, text=char,
fg=palette[offset],
bg=colors.transparent)
t = 0
l = 100
cb.put(x=(width - len(prompt)) / 2,
y=(height + l_height) / 2 + 1,
text=prompt, fg=grey, bg=colors.transparent)
while cb.poll_event() == EVENT_SKIP:
draw_logo(t if t < len(palette) else 0)
t += 1
if t > l + len(palette):
t = 0
if __name__ == "__main__":
demo()
| Tenchi2xh/cursebox | cursebox/__main__.py | Python | mit | 1,969 | 0.000542 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Intensity-based image registration
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import scipy.ndimage as nd
from nibabel import Nifti1Image
from .optimizer import configure_optimizer
from .affine import inverse_affine, subgrid_affine, affine_transforms
from .chain_transform import ChainTransform
from .similarity_measures import similarity_measures as builtin_simi
from ._register import _joint_histogram
MAX_INT = np.iinfo(np.intp).max
# Module globals
VERBOSE = os.environ.get('NIREG_DEBUG_PRINT', False) # enables online print statements
CLAMP_DTYPE = 'short' # do not edit
NPOINTS = 64 ** 3
# Dictionary of interpolation methods (partial volume, trilinear,
# random)
interp_methods = {'pv': 0, 'tri': 1, 'rand': -1}
def unpack(val, numtype):
try:
tmp = numtype(val)
out = (tmp, tmp)
except:
out = (numtype(val[0]), numtype(val[1]))
return out
class HistogramRegistration(object):
"""
A class to reprensent a generic intensity-based image registration
algorithm.
"""
def __init__(self, from_img, to_img,
from_mask=None,
to_mask=None,
bins=256,
spacing=None,
similarity='crl1',
interp='pv',
sigma=0,
renormalize=False,
dist=None):
"""Creates a new histogram registration object.
Parameters
----------
from_img : nibabel image
`From` image
to_img : nibabel image
`To` image
from_mask : array-like
Mask to apply to the `from` image
to_mask : array-like
Mask to apply to the `to` image
bins : integer or sequence
Number of histogram bins to represent the `from` and `to`
image, respectively. If float, the same binning is applied
to both images.
spacing : None or sequence
A sequence of three integers representing the subsampling
factors applied to the `from` image grid for faster
similarity computation. If None, the spacing is set
automatically so as to trade off between registration
accuracy and computation time.
similarity : str or callable
Cost-function for assessing image similarity. If a string,
one of 'cc': correlation coefficient, 'cr': correlation
ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual
information, 'nmi': normalized mutual information, 'slr':
supervised log-likelihood ratio. If a callable, it should
take a two-dimensional array representing the image joint
histogram as an input and return a float.
dist: None or array-like
Joint intensity probability distribution model for use with the
'slr' measure. Should be of shape (from_bins, to_bins).
interp : str
Interpolation method. One of 'pv': Partial volume, 'tri':
Trilinear, 'rand': Random interpolation. See ``joint_histogram.c``
sigma : float or sequence
Standard deviation(s) in millimeters of isotropic Gaussian
kernels used to smooth the `from` and `to` images,
respectively. If float, the same kernel size is applied to
both images. If 0, no smoothing is applied.
"""
# Binning sizes
from_bins, to_bins = unpack(bins, int)
# Smoothing kernel sizes
self._from_sigma, self._to_sigma = unpack(sigma, float)
# Clamping of the `from` image. The number of bins may be
# overriden if unnecessarily large.
data, from_bins_adjusted = clamp(from_img,
from_bins,
mask=from_mask,
sigma=self._from_sigma)
if not similarity == 'slr':
from_bins = from_bins_adjusted
self._from_img = Nifti1Image(data, from_img.get_affine())
# Set field of view in the `from` image with potential
# subsampling for faster similarity evaluation. This also sets
# the _from_data and _vox_coords attributes
if spacing == None:
npoints = NPOINTS
else:
npoints = None
if from_mask == None:
corner, size = (0, 0, 0), None
else:
corner, size = smallest_bounding_box(from_mask)
self.set_fov(spacing=spacing, corner=corner, size=size,
npoints=npoints)
# Clamping of the `to` image including padding with -1
data, to_bins_adjusted = clamp(to_img,
to_bins,
mask=to_mask,
sigma=self._to_sigma)
if not similarity == 'slr':
to_bins = to_bins_adjusted
self._to_data = -np.ones(np.array(to_img.shape) + 2, dtype=CLAMP_DTYPE)
self._to_data[1:-1, 1:-1, 1:-1] = data
self._to_inv_affine = inverse_affine(to_img.get_affine())
# Joint histogram: must be double contiguous as it will be
# passed to C routines which assume so
self._joint_hist = np.zeros([from_bins, to_bins], dtype='double')
# Set default registration parameters
self._set_interp(interp)
self._set_similarity(similarity, renormalize, dist=dist)
def _get_interp(self):
return list(interp_methods.keys())[\
list(interp_methods.values()).index(self._interp)]
def _set_interp(self, interp):
self._interp = interp_methods[interp]
interp = property(_get_interp, _set_interp)
def set_fov(self, spacing=None, corner=(0, 0, 0), size=None,
npoints=None):
"""
Defines a subset of the `from` image to restrict joint
histogram computation.
Parameters
----------
spacing : sequence (3,) of positive integers
Subsampling of image in voxels, where None (default) results
in the subsampling to be automatically adjusted to roughly
match a cubic grid with `npoints` voxels
corner : sequence (3,) of positive integers
Bounding box origin in voxel coordinates
size : sequence (3,) of positive integers
Desired bounding box size
npoints : positive integer
Desired number of voxels in the bounding box. If a `spacing`
argument is provided, then `npoints` is ignored.
"""
if spacing is None and npoints is None:
spacing = [1, 1, 1]
if size is None:
size = self._from_img.shape
slicer = lambda c, s, sp:\
tuple([slice(c[i], s[i] + c[i], sp[i]) for i in range(3)])
# Adjust spacing to match desired field of view size
if spacing is not None:
fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]
else:
fov_data = self._from_img.get_data()[slicer(corner, size, [1, 1, 1])]
spacing = ideal_spacing(fov_data, npoints=npoints)
fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]
self._from_data = fov_data
self._from_npoints = (fov_data >= 0).sum()
self._from_affine = subgrid_affine(self._from_img.get_affine(),
slicer(corner, size, spacing))
# We cache the voxel coordinates of the clamped image
self._from_spacing = spacing
self._vox_coords =\
np.indices(self._from_data.shape).transpose((1, 2, 3, 0))
def _set_similarity(self, similarity, renormalize=False, dist=None):
if similarity in builtin_simi:
if similarity == 'slr':
if dist is None:
raise ValueError('slr measure requires a joint intensity distribution model, '
'see `dist` argument of HistogramRegistration')
if dist.shape != self._joint_hist.shape:
raise ValueError('Wrong shape for the `dist` argument')
self._similarity = similarity
self._similarity_call =\
builtin_simi[similarity](self._joint_hist.shape,
self._from_npoints,
renormalize=renormalize,
dist=dist)
else:
if not hasattr(similarity, '__call__'):
raise ValueError('similarity should be callable')
self._similarity = 'custom'
self._similarity_call = similarity
def _get_similarity(self):
return self._similarity
similarity = property(_get_similarity, _set_similarity)
def eval(self, T):
"""
Evaluate similarity function given a world-to-world transform.
Parameters
----------
T : Transform
Transform object implementing ``apply`` method
"""
Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine)
return self._eval(Tv)
def eval_gradient(self, T, epsilon=1e-1):
"""
Evaluate the gradient of the similarity function wrt
transformation parameters.
The gradient is approximated using central finite differences
at the transformation specified by `T`. The input
transformation object `T` is modified in place unless it has a
``copy`` method.
Parameters
----------
T : Transform
Transform object implementing ``apply`` method
epsilon : float
Step size for finite differences in units of the
transformation parameters
Returns
-------
g : ndarray
Similarity gradient estimate
"""
param0 = T.param.copy()
if hasattr(T, 'copy'):
T = T.copy()
def simi(param):
T.param = param
return self.eval(T)
return approx_gradient(simi, param0, epsilon)
def eval_hessian(self, T, epsilon=1e-1, diag=False):
"""
Evaluate the Hessian of the similarity function wrt
transformation parameters.
The Hessian or its diagonal is approximated at the
transformation specified by `T` using central finite
differences. The input transformation object `T` is modified
in place unless it has a ``copy`` method.
Parameters
----------
T : Transform
Transform object implementing ``apply`` method
epsilon : float
Step size for finite differences in units of the
transformation parameters
diag : bool
If True, approximate the Hessian by a diagonal matrix.
Returns
-------
H : ndarray
Similarity Hessian matrix estimate
"""
param0 = T.param.copy()
if hasattr(T, 'copy'):
T = T.copy()
def simi(param):
T.param = param
return self.eval(T)
if diag:
return np.diag(approx_hessian_diag(simi, param0, epsilon))
else:
return approx_hessian(simi, param0, epsilon)
def _eval(self, Tv):
"""
Evaluate similarity function given a voxel-to-voxel transform.
Parameters
----------
Tv : Transform
Transform object implementing ``apply`` method
Should map voxel space to voxel space
"""
# trans_vox_coords needs be C-contiguous
trans_vox_coords = Tv.apply(self._vox_coords)
interp = self._interp
if self._interp < 0:
interp = - np.random.randint(MAX_INT)
_joint_histogram(self._joint_hist,
self._from_data.flat, # array iterator
self._to_data,
trans_vox_coords,
interp)
# Make sure all joint histogram entries are non-negative
np.maximum(self._joint_hist, 0, self._joint_hist)
return self._similarity_call(self._joint_hist)
def optimize(self, T, optimizer='powell', xtol=1e-2, ftol=1e-2, gtol=1e-3,
maxiter=25, maxfun=None, **kwargs):
""" Optimize transform `T` with respect to similarity measure.
The input object `T` will change as a result of the optimization.
Parameters
----------
T : object or str
An object representing a transformation that should
implement ``apply`` method and ``param`` attribute or
property. If a string, one of 'rigid', 'similarity', or
'affine'. The corresponding transformation class is then
initialized by default.
optimizer : str
Name of optimization function (one of 'powell', 'steepest',
'cg', 'bfgs', 'simplex')
**kwargs : dict
keyword arguments to pass to optimizer
Returns
-------
T : object
Locally optimal transformation
"""
# Replace T if a string is passed
if T in affine_transforms:
T = affine_transforms[T]()
# Pull callback out of keyword arguments, if present
callback = kwargs.pop('callback', None)
# Create transform chain object with T generating params
Tv = ChainTransform(T, pre=self._from_affine, post=self._to_inv_affine)
tc0 = Tv.param
# Cost function to minimize
def cost(tc):
# This is where the similarity function is calculcated
Tv.param = tc
return -self._eval(Tv)
# Callback during optimization
if callback is None and VERBOSE:
def callback(tc):
Tv.param = tc
print(Tv.optimizable)
print(str(self.similarity) + ' = %s' % self._eval(Tv))
print('')
# Switching to the appropriate optimizer
if VERBOSE:
print('Initial guess...')
print(Tv.optimizable)
kwargs.setdefault('xtol', xtol)
kwargs.setdefault('ftol', ftol)
kwargs.setdefault('gtol', gtol)
kwargs.setdefault('maxiter', maxiter)
kwargs.setdefault('maxfun', maxfun)
fmin, args, kwargs = configure_optimizer(optimizer,
fprime=None,
fhess=None,
**kwargs)
# Output
if VERBOSE:
print('Optimizing using %s' % fmin.__name__)
kwargs['callback'] = callback
Tv.param = fmin(cost, tc0, *args, **kwargs)
return Tv.optimizable
def explore(self, T, *args):
"""
Evaluate the similarity at the transformations specified by
sequences of parameter values.
For instance:
s, p = explore(T, (0, [-1,0,1]), (4, [-2.,2]))
Parameters
----------
T : object
Transformation around which the similarity function is to be
evaluated. It is modified in place unless it has a ``copy``
method.
args : tuple
Each element of `args` is a sequence of two elements, where
the first element specifies a transformation parameter axis
and the second element gives the successive parameter values
to evaluate along that axis.
Returns
-------
s : ndarray
Array of similarity values
p : ndarray
Corresponding array of evaluated transformation parameters
"""
nparams = T.param.size
if hasattr(T, 'copy'):
T = T.copy()
deltas = [[0] for i in range(nparams)]
for a in args:
deltas[a[0]] = a[1]
grids = np.mgrid[[slice(0, len(d)) for d in deltas]]
ntrials = np.prod(grids.shape[1:])
Deltas = [np.asarray(deltas[i])[grids[i, :]].ravel()\
for i in range(nparams)]
simis = np.zeros(ntrials)
params = np.zeros([nparams, ntrials])
Tv = ChainTransform(T, pre=self._from_affine,
post=self._to_inv_affine)
param0 = Tv.param
for i in range(ntrials):
param = param0 + np.array([D[i] for D in Deltas])
Tv.param = param
simis[i] = self._eval(Tv)
params[:, i] = param
return simis, params
def ideal_spacing(data, npoints):
"""
Tune spacing factors so that the number of voxels in the
output block matches a given number.
Parameters
----------
data : ndarray or sequence
Data image to subsample
npoints : number
Target number of voxels (negative values will be ignored)
Returns
-------
spacing: ndarray
Spacing factors
"""
dims = data.shape
actual_npoints = (data >= 0).sum()
spacing = np.ones(3, dtype='uint')
while actual_npoints > npoints:
# Subsample the direction with the highest number of samples
ddims = dims / spacing
if ddims[0] >= ddims[1] and ddims[0] >= ddims[2]:
dir = 0
elif ddims[1] > ddims[0] and ddims[1] >= ddims[2]:
dir = 1
else:
dir = 2
spacing[dir] += 1
subdata = data[::spacing[0], ::spacing[1], ::spacing[2]]
actual_npoints = (subdata >= 0).sum()
return spacing
def smallest_bounding_box(msk):
"""
Extract the smallest bounding box from a mask
Parameters
----------
msk : ndarray
Array of boolean
Returns
-------
corner: ndarray
3-dimensional coordinates of bounding box corner
size: ndarray
3-dimensional size of bounding box
"""
x, y, z = np.where(msk > 0)
corner = np.array([x.min(), y.min(), z.min()])
size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])
return corner, size
def approx_gradient(f, x, epsilon):
"""
Approximate the gradient of a function using central finite
differences
Parameters
----------
f: callable
The function to differentiate
x: ndarray
Point where the function gradient is to be evaluated
epsilon: float
Stepsize for finite differences
Returns
-------
g: ndarray
Function gradient at `x`
"""
n = len(x)
g = np.zeros(n)
ei = np.zeros(n)
for i in range(n):
ei[i] = .5 * epsilon
g[i] = (f(x + ei) - f(x - ei)) / epsilon
ei[i] = 0
return g
def approx_hessian_diag(f, x, epsilon):
"""
Approximate the Hessian diagonal of a function using central
finite differences
Parameters
----------
f: callable
The function to differentiate
x: ndarray
Point where the Hessian is to be evaluated
epsilon: float
Stepsize for finite differences
Returns
-------
h: ndarray
Diagonal of the Hessian at `x`
"""
n = len(x)
h = np.zeros(n)
ei = np.zeros(n)
fx = f(x)
for i in range(n):
ei[i] = epsilon
h[i] = (f(x + ei) + f(x - ei) - 2 * fx) / (epsilon ** 2)
ei[i] = 0
return h
def approx_hessian(f, x, epsilon):
"""
Approximate the full Hessian matrix of a function using central
finite differences
Parameters
----------
f: callable
The function to differentiate
x: ndarray
Point where the Hessian is to be evaluated
epsilon: float
Stepsize for finite differences
Returns
-------
H: ndarray
Hessian matrix at `x`
"""
n = len(x)
H = np.zeros((n, n))
ei = np.zeros(n)
for i in range(n):
ei[i] = .5 * epsilon
g1 = approx_gradient(f, x + ei, epsilon)
g2 = approx_gradient(f, x - ei, epsilon)
H[i, :] = (g1 - g2) / epsilon
ei[i] = 0
return H
def smooth(img, sigma):
"""
Smooth an image by an isotropic Gaussian filter
Parameters
----------
img: nibabel-like image
Input image
sigma: float
Filter standard deviation in mm
Returns
-------
sdata: ndarray
Smoothed data array
"""
if sigma < 0:
raise ValueError('smoothing kernel size is negative')
elif sigma == 0:
return img.get_data()
else:
sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))
return nd.gaussian_filter(img.get_data(), sigma_vox)
def _clamp_array(x, y, bins):
# Threshold
dmaxmax = 2 ** (8 * y.dtype.itemsize - 1) - 1
dmax = bins - 1 # default output maximum value
if dmax > dmaxmax:
raise ValueError('Excess number of bins')
xmin = float(x.min())
xmax = float(x.max())
d = xmax - xmin
"""
If the image dynamic is small, no need for compression: just
downshift image values and re-estimate the dynamic range (hence
xmax is translated to xmax-tth casted to the appropriate
dtype. Otherwise, compress after downshifting image values (values
equal to the threshold are reset to zero).
"""
if issubclass(x.dtype.type, np.integer) and d <= dmax:
y[:] = x - xmin
bins = int(d) + 1
else:
a = dmax / d
y[:] = np.round(a * (x - xmin))
return y, bins
def clamp_array(x, bins, mask=None):
"""
Clamp array values that fall within a given mask in the range
[0..bins-1] and reset masked values to -1.
Parameters
----------
x : ndarray
The input array
bins : number
Desired number of bins
mask : ndarray, tuple or slice
Anything such that x[mask] is an array.
Returns
-------
y : ndarray
Clamped array, masked items are assigned -1
bins : int
Adjusted number of bins
"""
if bins > np.iinfo(np.short).max:
raise ValueError('Too large a bin size')
y = -np.ones(x.shape, dtype=CLAMP_DTYPE)
if mask is None:
y, bins = _clamp_array(x, y, bins)
else:
ym = y[mask]
xm = x[mask]
ym, bins = _clamp_array(xm, ym, bins)
y[mask] = ym
return y, bins
def clamp(img, bins, mask=None, sigma=0):
"""Remap in-mask image intensity values to the range
[0..bins-1]. Out-of-mask voxels are mapped to -1. A spatial
Gaussian filter is possibly applied as a pre-processing.
Parameters
----------
img: nibabel-like image
Input image
bins : number
Desired number of bins
mask : ndarray, tuple or slice
Image mask
sigma: float
Gaussian filter standard deviation in mm
Returns
-------
data: ndarray
Clamped data array
bins: int
Adjusted number of bins
"""
data = smooth(img, sigma)
return clamp_array(data, bins, mask=mask)
| alexis-roche/nireg | nireg/histogram_registration.py | Python | bsd-3-clause | 23,075 | 0.000563 |
"""Test sensor of Brother integration."""
from datetime import datetime, timedelta
import json
from homeassistant.components.brother.const import UNIT_PAGES
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import UTC, utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.brother import init_integration
ATTR_REMAINING_PAGES = "remaining_pages"
ATTR_COUNTER = "counter"
async def test_sensors(hass):
"""Test states of the sensors."""
test_time = datetime(2019, 11, 11, 9, 10, 32, tzinfo=UTC)
with patch(
"homeassistant.components.brother.sensor.utcnow", return_value=test_time
):
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer"
assert state.state == "waiting"
entry = registry.async_get("sensor.hl_l2340dw_status")
assert entry
assert entry.unique_id == "0123456789_status"
state = hass.states.get("sensor.hl_l2340dw_black_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "75"
entry = registry.async_get("sensor.hl_l2340dw_black_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_black_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_cyan_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "10"
entry = registry.async_get("sensor.hl_l2340dw_cyan_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_cyan_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_magenta_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "8"
entry = registry.async_get("sensor.hl_l2340dw_magenta_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_magenta_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_yellow_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "2"
entry = registry.async_get("sensor.hl_l2340dw_yellow_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_yellow_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 11014
assert state.attributes.get(ATTR_COUNTER) == 986
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_black_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_black_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_black_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_cyan_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_magenta_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_yellow_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_fuser_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:water-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
entry = registry.async_get("sensor.hl_l2340dw_fuser_remaining_life")
assert entry
assert entry.unique_id == "0123456789_fuser_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:current-ac"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
entry = registry.async_get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert entry
assert entry.unique_id == "0123456789_belt_unit_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "98"
entry = registry.async_get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert entry
assert entry.unique_id == "0123456789_pf_kit_1_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_page_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "986"
entry = registry.async_get("sensor.hl_l2340dw_page_counter")
assert entry
assert entry.unique_id == "0123456789_page_counter"
state = hass.states.get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "538"
entry = registry.async_get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert entry
assert entry.unique_id == "0123456789_duplex_unit_pages_counter"
state = hass.states.get("sensor.hl_l2340dw_b_w_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "709"
entry = registry.async_get("sensor.hl_l2340dw_b_w_counter")
assert entry
assert entry.unique_id == "0123456789_b/w_counter"
state = hass.states.get("sensor.hl_l2340dw_color_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "902"
entry = registry.async_get("sensor.hl_l2340dw_color_counter")
assert entry
assert entry.unique_id == "0123456789_color_counter"
state = hass.states.get("sensor.hl_l2340dw_uptime")
assert state
assert state.attributes.get(ATTR_ICON) is None
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TIMESTAMP
assert state.state == "2019-09-24T12:14:56+00:00"
entry = registry.async_get("sensor.hl_l2340dw_uptime")
assert entry
assert entry.unique_id == "0123456789_uptime"
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when device is offline."""
await init_integration(hass)
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
future = utcnow() + timedelta(minutes=5)
with patch("brother.Brother._get_data", side_effect=ConnectionError()):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=10)
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass)
await async_setup_component(hass, "homeassistant", {})
with patch("homeassistant.components.brother.Brother.async_update") as mock_update:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.hl_l2340dw_status"]},
blocking=True,
)
assert len(mock_update.mock_calls) == 1
| tchellomello/home-assistant | tests/components/brother/test_sensor.py | Python | apache-2.0 | 10,659 | 0.000281 |
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import UserError
from odoo.tools import float_compare
from lxml import etree
import logging
_logger = logging.getLogger(__name__)
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
delivery_status = fields.Selection([
('no', 'Not purchased'),
('to receive', 'To Receive'),
('received', 'Received'),
],
string='Delivery Status',
compute='_compute_delivery_status',
store=True,
readonly=True,
copy=False,
default='no'
)
vouchers = fields.Char(
compute='_compute_vouchers'
)
qty_on_voucher = fields.Float(
compute="_compute_qty_on_voucher",
string="On Voucher",
digits='Product Unit of Measure',
)
qty_returned = fields.Float(
string='Returned',
copy=False,
default=0.0,
readonly=True,
compute='_compute_qty_returned'
)
@api.depends_context('voucher')
def _compute_qty_on_voucher(self):
# al calcular por voucher no tenemos en cuenta el metodo de facturacion
# es decir, que calculamos como si fuese metodo segun lo recibido
voucher = self._context.get('voucher', False)
if not voucher:
self.update({'qty_on_voucher': 0.0})
return
lines = self.filtered(
lambda x: x.order_id.state in ['purchase', 'done'])
moves = self.env['stock.move'].search([
('id', 'in', lines.mapped('move_ids').ids),
('state', '=', 'done'),
('picking_id.vouchers', 'ilike', voucher[0]),
])
for line in lines:
line.qty_on_voucher = sum(moves.filtered(
lambda x: x.id in line.move_ids.ids).mapped('product_uom_qty'))
def button_cancel_remaining(self):
# la cancelación de kits no está bien resuelta ya que odoo
# solo computa la cantidad entregada cuando todo el kit se entregó.
# Cuestión que, por ahora, desactivamos la cancelación de kits.
bom_enable = 'bom_ids' in self.env['product.template']._fields
for rec in self:
old_product_qty = rec.product_qty
# TODO tal vez cambiar en v10
# en este caso si lo bloqueamos ya que si llegan a querer generar
# nc lo pueden hacer con el buscar líneas de las facturas
# y luego lo pueden terminar cancelando
if rec.qty_invoiced > rec.qty_received:
raise UserError(_(
'You can not cancel remianing qty to receive because '
'there are more product invoiced than the received. '
'You should correct invoice or ask for a refund'))
if bom_enable:
bom = self.env['mrp.bom']._bom_find(
product=rec.product_id)
if bom and bom.type == 'phantom':
raise UserError(_(
"Cancel remaining can't be called for Kit Products "
"(products with a bom of type kit)."))
rec.product_qty = rec.qty_received
to_cancel_moves = rec.move_ids.filtered(
lambda x: x.state not in ['done', 'cancel'])
to_cancel_moves._cancel_quantity()
rec.order_id.message_post(
body=_(
'Cancel remaining call for line "%s" (id %s), line '
'qty updated from %s to %s') % (
rec.name, rec.id, old_product_qty, rec.product_qty))
def _compute_vouchers(self):
for rec in self:
rec.vouchers = ', '.join(rec.mapped(
'move_ids.picking_id.voucher_ids.display_name'))
@api.depends(
'order_id.state', 'qty_received', 'qty_returned', 'product_qty',
'order_id.force_delivered_status')
def _compute_delivery_status(self):
precision = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
for line in self:
if line.state not in ('purchase', 'done'):
line.delivery_status = 'no'
continue
if line.order_id.force_delivered_status:
line.delivery_status = line.order_id.force_delivered_status
continue
if float_compare(
(line.qty_received + line.qty_returned), line.product_qty,
precision_digits=precision) == -1:
line.delivery_status = 'to receive'
elif float_compare(
(line.qty_received + line.qty_returned), line.product_qty,
precision_digits=precision) >= 0:
line.delivery_status = 'received'
else:
line.delivery_status = 'no'
@api.onchange('product_qty')
def _onchange_product_qty(self):
if (
self.state == 'purchase' and
self.product_id.type in ['product', 'consu'] and
self.product_qty < self._origin.product_qty):
warning_mess = {
'title': _('Ordered quantity decreased!'),
'message': (
'¡Está reduciendo la cantidad pedida! Recomendamos usar'
' el botón para cancelar remanente y'
' luego setear la cantidad deseada.'),
}
self.product_qty = self._origin.product_qty
return {'warning': warning_mess}
return {}
@api.depends('order_id.state', 'move_ids.state')
def _compute_qty_returned(self):
for line in self:
qty = 0.0
for move in line.move_ids.filtered(
lambda m: m.state == 'done' and
m.location_id.usage != 'supplier' and m.to_refund):
qty += move.product_uom._compute_quantity(
move.product_uom_qty,
line.product_uom)
line.qty_returned = qty
# Overwrite the origin method to introduce the qty_on_voucher
def action_add_all_to_invoice(self):
for rec in self:
rec.invoice_qty = rec.qty_on_voucher or (
rec.qty_to_invoice + rec.invoice_qty)
@api.model
def fields_view_get(self, view_id=None, view_type='form',
toolbar=False, submenu=False):
"""
If we came from invoice, we send in context 'force_line_edit'
and we change tree view to make editable and also field qty
"""
res = super().fields_view_get(
view_id=view_id, view_type=view_type,
toolbar=toolbar, submenu=submenu)
if self._context.get('force_line_edit') and view_type == 'tree':
doc = etree.XML(res['arch'])
placeholder = doc.xpath("//field[1]")[0]
placeholder.addprevious(
etree.Element('field', {
'name': 'qty_on_voucher',
'readonly': '1',
# on enterprise view is not refres
# 'invisible': "not context.get('voucher', False)",
}))
res['fields'].update(self.fields_get(
['qty_on_voucher']))
res['arch'] = etree.tostring(doc)
return res
| ingadhoc/purchase | purchase_stock_ux/models/purchase_order_line.py | Python | agpl-3.0 | 7,593 | 0 |
#!/usr/bin/env python
__author__ = "Mari Wahl"
__copyright__ = "Copyright 2014, The Cogent Project"
__credits__ = ["Mari Wahl"]
__license__ = "GPL"
__version__ = "4.1"
__maintainer__ = "Mari Wahl"
__email__ = "marina.w4hl@gmail.com"
from helpers import running, constants
# change here for type of net:
NETWORK_FILES = constants.NETWORK_FILES_UN_ROAD
TYPE_NET_DIR = "road/"
def main():
running.sampling(NETWORK_FILES, TYPE_NET_DIR, [])
print("All graphs for " + TYPE_NET_DIR + " were processed. The end! \n")
if __name__ == '__main__':
main()
| bt3gl/NetAna-Complex-Network-Analysis | src/calculate_features_advanced/road.py | Python | mit | 577 | 0.015598 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all first party audience segments.
To create first party audience segments, run create_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201505')
# Create statement object to only select first party audience segments.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'FIRST_PARTY'
}
}]
query = 'WHERE Type = :type'
statement = dfp.FilterStatement(query, values)
# Get audience segments by statement.
while True:
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
segments = response['results']
for segment in segments:
print ('Audience segment with id \'%s\' and name \'%s\' of size '
'%s was found. ' %
(segment['id'], segment['name'], segment['size']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| wubr2000/googleads-python-lib | examples/dfp/v201505/audience_segment_service/get_first_party_audience_segments.py | Python | apache-2.0 | 2,062 | 0.009214 |
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from django_openid_auth.store import DjangoOpenIDStore
class Command(NoArgsCommand):
help = 'Clean up stale OpenID associations and nonces'
def handle_noargs(self, **options):
store = DjangoOpenIDStore()
store.cleanup()
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django_openid_auth/management/commands/openid_cleanup.py | Python | agpl-3.0 | 1,732 | 0 |
#!/usr/bin/python
import re
import sys
import os
import getopt
import vcf
def main():
params = parseArgs()
vfh = vcf.Reader(open(params.vcf, 'r'))
#grab contig sizes
contigs = dict()
for c,s in vfh.contigs.items():
contigs[s.id] = s.length
regions = list()
this_chrom = None
start = int()
stop = int()
count = 0
for rec in vfh:
if not this_chrom:
this_chrom = rec.CHROM
start = 1
stop = 1
count = 0
#If we entered new chromosome, submit old break
elif this_chrom != rec.CHROM:
t = tuple([this_chrom, start, contigs[this_chrom]])
regions.append(t)
this_chrom = rec.CHROM
start = 1
stop = 1
count = 0
#if this SNP is parsimony-informative
if rec.is_snp and not rec.is_monomorphic:
#Check if parsimony-informative
if is_PIS(rec):
count+=1
#if this is the final PIS, submit region to list
if count == params.force:
stop = rec.POS
t = tuple([this_chrom, start, stop])
regions.append(t)
start = stop + 1
count = 0
t = tuple([this_chrom, start, contigs[this_chrom]])
regions.append(t)
print("Writing regions to out.regions...")
write_regions("out.regions", regions)
#Function to write list of regions tuples, in GATK format
def write_regions(f, r):
with open(f, 'w') as fh:
try:
for reg in r:
ol = str(reg[0]) + ":" + str(reg[1]) + "-" + str(reg[2]) + "\n"
fh.write(ol)
except IOError as e:
print("Could not read file %s: %s"%(f,e))
sys.exit(1)
except Exception as e:
print("Unexpected error reading file %s: %s"%(f,e))
sys.exit(1)
finally:
fh.close()
#Function to check pyVCF record for if parsimony informative or not
def is_PIS(r):
ref=0
alt=0
for call in r.samples:
if call.gt_type:
if call.gt_type == 0:
ref += 1
elif call.gt_type == 1:
alt += 1
elif call.gt_type == 2:
alt += 1
ref += 1
if ref >= 2 and alt >= 2:
return(True)
if ref <= 2 and alt <= 2:
return(False)
#Object to parse command-line arguments
class parseArgs():
def __init__(self):
#Define options
try:
options, remainder = getopt.getopt(sys.argv[1:], 'v:f:h', \
["vcf=" "help", "force="])
except getopt.GetoptError as err:
print(err)
self.display_help("\nExiting because getopt returned non-zero exit status.")
#Default values for params
#Input params
self.vcf=None
self.force=100000
#First pass to see if help menu was called
for o, a in options:
if o in ("-h", "-help", "--help"):
self.display_help("Exiting because help menu was called.")
#Second pass to set all args.
for opt, arg_raw in options:
arg = arg_raw.replace(" ","")
arg = arg.strip()
opt = opt.replace("-","")
#print(opt,arg)
if opt in ('v', 'vcf'):
self.vcf = arg
elif opt in ('f','force'):
self.force=int(arg)
elif opt in ('h', 'help'):
pass
else:
assert False, "Unhandled option %r"%opt
#Check manditory options are set
if not self.vcf:
self.display_help("Must provide VCF file <-v,--vcf>")
def display_help(self, message=None):
if message is not None:
print()
print (message)
print ("\nfindBreaksVCF.py\n")
print ("Contact:Tyler K. Chafin, University of Arkansas,tkchafin@uark.edu")
print ("\nUsage: ", sys.argv[0], "-v <input.vcf> -f <100000>\n")
print ("Description: Breaks chromosomes into chunks of X parsimony-informative sites, for running MDL")
print("""
Arguments:
-v,--vcf : VCF file for parsing
-f,--force : Number of PIS to force a break
-h,--help : Displays help menu
""")
print()
sys.exit()
#Call main function
if __name__ == '__main__':
main()
| tkchafin/scripts | findBreaksVCF.py | Python | gpl-3.0 | 3,606 | 0.044925 |
"""Compare gas/elec demand on Local Authority Districts with modelled demand
"""
import os
import operator
import logging
import copy
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from energy_demand.basic import lookup_tables
from energy_demand import enduse_func
from energy_demand.profiles import load_profile
from energy_demand.validation import elec_national_data
from energy_demand.read_write import data_loader
from energy_demand.basic import date_prop
from energy_demand.plotting import fig_load_profile_dh_multiple
from energy_demand.plotting import basic_plot_functions
from energy_demand.plotting import plotting_styles
def map_LAD_2011_2015(lad_data):
"""Map LAD 2015 values to LAD 2011.
Arguments
-----------
lad_data : dict
LAD 2015 data
Returns
--------
mapped_lads : dict
LAD 2011 census data lads
"""
mapped_lads = copy.deepcopy(lad_data)
mapped_lads.keys()
try:
# E41000324 (City of London, Westminster) splits
# to E09000001 (City of London) and E09000033 (Westminster)
mapped_lads['E41000324'] = lad_data['E09000001'] + lad_data['E09000033']
del mapped_lads['E09000001']
del mapped_lads['E09000033']
except:
pass
try:
# E41000052 (Cornwall, Isles of Scilly) splits
# to E06000052 (Cornwall) and E06000053 (Isles of Scilly) (edited)
mapped_lads['E41000052'] = lad_data['E06000052'] + lad_data['E06000053']
del mapped_lads['E06000052']
del mapped_lads['E06000053']
except:
pass
try:
# missing S12000013 (Na h-Eileanan Siar)
# and S12000027 (Shetland Islands)
del mapped_lads['S12000013']
del mapped_lads['S12000027']
except:
pass
return mapped_lads
def temporal_validation(
result_paths,
ed_fueltype_yh,
elec_factored_yh,
plot_criteria
):
"""National hourly electricity data is validated with
the summed modelled hourly demand for all regions.
Because the total annual modelled and real demands
do not match (because of different data sources
and because Northern Ireland is not included in the
validation data) a correction factor is used.
Arguments
---------
result_paths : dict
Paths
ed_fueltype_yh : array
Fuel type specific yh energy demand
plot_criteria : bool
Criteria to show plots or not
"""
# ----------------
# Plot a full year
# ----------------
days_to_plot = list(range(0, 365))
elec_national_data.compare_results(
'validation_temporal_electricity_8760h.pdf',
result_paths['data_results_validation'],
elec_factored_yh,
ed_fueltype_yh,
'all_submodels',
days_to_plot,
plot_crit=plot_criteria)
# Plot four weeks (one of each season)
winter_week = list(range(
date_prop.date_to_yearday(2015, 1, 12), date_prop.date_to_yearday(2015, 1, 19))) #Jan
spring_week = list(range(
date_prop.date_to_yearday(2015, 5, 11), date_prop.date_to_yearday(2015, 5, 18))) #May
summer_week = list(range(
date_prop.date_to_yearday(2015, 7, 13), date_prop.date_to_yearday(2015, 7, 20))) #Jul
autumn_week = list(range(
date_prop.date_to_yearday(2015, 10, 12), date_prop.date_to_yearday(2015, 10, 19))) #Oct
days_to_plot = winter_week + spring_week + summer_week + autumn_week
elec_national_data.compare_results(
'validation_temporal_electricity_weeks_selection.pdf',
result_paths['data_results_validation'],
elec_factored_yh,
ed_fueltype_yh,
'all_submodels',
days_to_plot,
plot_crit=plot_criteria)
return
def spatial_validation_lad_level(
disaggregated_fuel,
data_results_validation,
paths,
regions,
reg_coord,
plot_crit
):
"""Spatial validation
"""
fuel_elec_regs_yh = {}
fuel_gas_regs_yh = {}
fuel_gas_residential_regs_yh = {}
fuel_gas_non_residential_regs_yh = {}
fuel_elec_residential_regs_yh = {}
fuel_elec_non_residential_regs_yh = {}
lookups = lookup_tables.basic_lookups()
# -------------------------------------------
# Spatial validation
# -------------------------------------------
subnational_elec = data_loader.read_lad_demands(paths['val_subnational_elec'])
subnational_elec_residential = data_loader.read_lad_demands(paths['val_subnational_elec_residential'])
subnational_elec_non_residential = data_loader.read_lad_demands(paths['val_subnational_elec_non_residential'])
subnational_gas = data_loader.read_lad_demands(paths['val_subnational_gas'])
subnational_gas_residential = data_loader.read_lad_demands(paths['val_subnational_gas_residential'])
subnational_gas_non_residential = data_loader.read_lad_demands(paths['val_subnational_gas_non_residential'])
logging.info("compare total II {} {}".format(sum(subnational_gas.values()), sum(subnational_gas_residential.values())))
# Create fueltype secific dict
for region in regions:
fuel_elec_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs'][region][lookups['fueltypes']['electricity']]
fuel_elec_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_residenital'][region][lookups['fueltypes']['electricity']]
fuel_elec_non_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_non_residential'][region][lookups['fueltypes']['electricity']]
fuel_gas_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs'][region][lookups['fueltypes']['gas']]
fuel_gas_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_residenital'][region][lookups['fueltypes']['gas']]
fuel_gas_non_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_non_residential'][region][lookups['fueltypes']['gas']]
# ----------------------------------------
# Remap demands between 2011 and 2015 LADs
# ----------------------------------------
subnational_elec = map_LAD_2011_2015(subnational_elec)
subnational_elec_residential = map_LAD_2011_2015(subnational_elec_residential)
subnational_elec_non_residential = map_LAD_2011_2015(subnational_elec_non_residential)
subnational_gas = map_LAD_2011_2015(subnational_gas)
subnational_gas_residential = map_LAD_2011_2015(subnational_gas_residential)
subnational_gas_non_residential = map_LAD_2011_2015(subnational_gas_non_residential)
fuel_elec_regs_yh = map_LAD_2011_2015(fuel_elec_regs_yh)
fuel_elec_residential_regs_yh = map_LAD_2011_2015(fuel_elec_residential_regs_yh)
fuel_elec_non_residential_regs_yh = map_LAD_2011_2015(fuel_elec_non_residential_regs_yh)
fuel_gas_regs_yh = map_LAD_2011_2015(fuel_gas_regs_yh)
fuel_gas_residential_regs_yh = map_LAD_2011_2015(fuel_gas_residential_regs_yh)
fuel_gas_non_residential_regs_yh = map_LAD_2011_2015(fuel_gas_non_residential_regs_yh)
logging.info("compare total {} {}".format(
sum(fuel_gas_residential_regs_yh.values()), sum(fuel_gas_regs_yh.values())))
# --------------------------------------------
# Correct REAL Values that sum is the same
# ----------------------------------------------
data_inputlist = [
(fuel_elec_residential_regs_yh, subnational_elec_residential), # domestic
(fuel_elec_non_residential_regs_yh, subnational_elec_non_residential)] # nondomestics
spatial_validation_multiple(
reg_coord=reg_coord,
input_data=data_inputlist,
regions=regions,
fueltype_str='elec',
fig_name=os.path.join(data_results_validation, 'validation_multiple_elec.pdf'),
label_points=False,
plotshow=plot_crit)
data_inputlist = [
(fuel_gas_residential_regs_yh, subnational_gas_residential), # domestic
(fuel_gas_non_residential_regs_yh, subnational_gas_non_residential)] # nondomestics
spatial_validation_multiple(
reg_coord=reg_coord,
input_data=data_inputlist,
regions=regions,
fueltype_str='gas',
fig_name=os.path.join(data_results_validation, 'validation_multiple_gas.pdf'),
label_points=False,
plotshow=plot_crit)
logging.info("... Validation of electricity")
spatial_validation(
fuel_elec_regs_yh,
subnational_elec,
regions,
'elec',
os.path.join(data_results_validation, 'validation_spatial_elec.pdf'),
label_points=True,
plotshow=plot_crit)
logging.info("... Validation of residential electricity")
spatial_validation(
fuel_elec_residential_regs_yh,
subnational_elec_residential,
regions,
'elec',
os.path.join(data_results_validation, 'validation_spatial_residential_elec.pdf'),
label_points=True,
plotshow=plot_crit)
logging.info("... Validation of non-residential electricity")
spatial_validation(
fuel_elec_non_residential_regs_yh,
subnational_elec_non_residential,
regions,
'elec',
os.path.join(data_results_validation, 'validation_spatial_non_residential_elec.pdf'),
label_points=True,
plotshow=plot_crit)
logging.info("... Validation of gas")
spatial_validation(
fuel_gas_regs_yh,
subnational_gas,
regions,
'gas',
os.path.join(data_results_validation, 'validation_spatial_gas.pdf'),
label_points=True,
plotshow=plot_crit)
logging.info("... Validation of residential gas")
spatial_validation(
fuel_gas_residential_regs_yh,
subnational_gas_residential,
regions,
'gas',
os.path.join(data_results_validation, 'validation_spatial_residential_gas.pdf'),
label_points=True,
plotshow=plot_crit)
logging.info("... Validation of non residential gas")
spatial_validation(
fuel_gas_non_residential_regs_yh,
subnational_gas_non_residential,
regions,
'gas',
os.path.join(data_results_validation, 'validation_spatial_non_residential_gas.pdf'),
label_points=True,
plotshow=plot_crit)
return
def temporal_validation_msoa_lad(
ed_fueltype_national_yh,
ed_fueltype_regs_yh,
fueltypes,
result_paths,
paths,
regions,
reg_coord,
seasons,
model_yeardays_daytype,
plot_crit
):
"""Validate national hourly demand for yearls fuel
for all LADs. Test how the national disaggregation
works.
Info
-----
Because the floor area is only availabe for LADs from 2001,
the LADs are converted to 2015 LADs.
"""
logging.info("... temporal validation")
# -------------------------------------------
# Electrictiy demands
# -------------------------------------------
# LAD level
subnational_elec_lad = data_loader.read_lad_demands(
paths['val_subnational_elec'])
# MSOA level
subnational_elec_msoa = data_loader.read_elec_data_msoa(
paths['val_subnational_msoa_elec'])
# Create fueltype secific dict
fuel_elec_regs_yh = {}
for region_array_nr, region in enumerate(regions):
gwh_modelled = np.sum(ed_fueltype_regs_yh[fueltypes['electricity']][region_array_nr])
fuel_elec_regs_yh[region] = gwh_modelled
# Create fueltype secific dict
fuel_gas_regs_yh = {}
for region_array_nr, region in enumerate(regions):
gwh_modelled = np.sum(ed_fueltype_regs_yh[fueltypes['gas']][region_array_nr])
fuel_gas_regs_yh[region] = gwh_modelled
# ----------------------------------------
# Remap demands between 2011 and 2015 LADs
# ----------------------------------------
subnational_elec = map_LAD_2011_2015(subnational_elec_lad)
fuel_elec_regs_yh = map_LAD_2011_2015(fuel_elec_regs_yh)
spatial_validation(
fuel_elec_regs_yh,
subnational_elec,
regions,
'elec',
os.path.join(result_paths['data_results_validation'], 'validation_spatial_elec_msoa_lad.pdf'),
label_points=False,
plotshow=plot_crit)
return
def spatio_temporal_val(
ed_fueltype_national_yh,
ed_fueltype_regs_yh,
result_paths,
paths,
regions,
seasons,
model_yeardays_daytype,
plot_crit
):
"""Validate spatial and temporal energy demands
Info
-----
Because the floor area is only availabe for LADs from 2001,
the LADs are converted to 2015 LADs.
"""
logging.info("... temporal validation")
fueltypes = lookup_tables.basic_lookups()['fueltypes']
# -------------------------------------------
# Spatial validation after calculations
# -------------------------------------------
subnational_elec = data_loader.read_lad_demands(
paths['val_subnational_elec'])
subnational_gas = data_loader.read_lad_demands(
paths['val_subnational_gas'])
# Create fueltype secific dict
fuel_elec_regs_yh = {}
for region_array_nr, region in enumerate(regions):
fuel_elec_regs_yh[region] = np.sum(ed_fueltype_regs_yh[fueltypes['electricity']][region_array_nr])
# Create fueltype secific dict
fuel_gas_regs_yh = {}
for region_array_nr, region in enumerate(regions):
fuel_gas_regs_yh[region] = np.sum(ed_fueltype_regs_yh[fueltypes['gas']][region_array_nr])
# ----------------------------------------
# Remap demands between 2011 and 2015 LADs
# ----------------------------------------
subnational_elec = map_LAD_2011_2015(subnational_elec)
subnational_gas = map_LAD_2011_2015(subnational_gas)
fuel_elec_regs_yh = map_LAD_2011_2015(fuel_elec_regs_yh)
fuel_gas_regs_yh = map_LAD_2011_2015(fuel_gas_regs_yh)
spatial_validation(
fuel_elec_regs_yh,
subnational_elec,
regions,
'elec',
fig_name=os.path.join(result_paths['data_results_validation'], 'validation_spatial_elec_post_calcualtion.pdf'),
label_points=False,
plotshow=plot_crit)
spatial_validation(
fuel_gas_regs_yh,
subnational_gas,
regions,
'gas',
fig_name=os.path.join(result_paths['data_results_validation'], 'validation_spatial_gas_post_calcualtion.pdf'),
label_points=False,
plotshow=plot_crit)
# -------------------------------------------
# Temporal validation (hourly for national)
# -------------------------------------------
# Read validation data
elec_2015_indo, elec_2015_itsdo = elec_national_data.read_raw_elec_2015(
paths['val_nat_elec_data'])
f_diff_elec = np.sum(ed_fueltype_national_yh[fueltypes['electricity']]) / np.sum(elec_2015_indo)
logging.info("... ed diff modellend and real [p] %s: ", (1 - f_diff_elec) * 100)
elec_factored_yh = f_diff_elec * elec_2015_indo
temporal_validation(
result_paths,
ed_fueltype_national_yh[fueltypes['electricity']],
elec_factored_yh,
plot_crit)
# ---------------------------------------------------
# Calculate average season and daytypes and plot
# ---------------------------------------------------
logging.info("...calculate average data and plot per season and fueltype")
calc_av_lp_modelled, calc_lp_modelled = load_profile.calc_av_lp(
ed_fueltype_national_yh[fueltypes['electricity']],
seasons,
model_yeardays_daytype)
calc_av_lp_real, calc_lp_real = load_profile.calc_av_lp(
elec_factored_yh,
seasons,
model_yeardays_daytype)
# Plot average daily loads
fig_load_profile_dh_multiple.run(
path_fig_folder=result_paths['data_results_validation'],
path_plot_fig=os.path.join(
result_paths['data_results_validation'],
'validation_all_season_daytypes.pdf'),
calc_av_lp_modelled=calc_av_lp_modelled,
calc_av_lp_real=calc_av_lp_real,
calc_lp_modelled=calc_lp_modelled,
calc_lp_real=calc_lp_real,
plot_peak=True,
plot_radar=False,
plot_all_entries=False,
plot_max_min_polygon=True,
plotshow=False,
max_y_to_plot=60,
fueltype_str=False,
year=False)
# ---------------------------------------------------
# Validation of national electrictiy demand for peak
# ---------------------------------------------------
logging.debug(
"...validation of peak data: compare peak with data")
# Because the coldest day is not the same for every region,
# the coldest day needs to be defined manually or defined
# by getting the hours with maximum electricity demand
# Peak across all fueltypes WARNING: Fueltype specific
peak_day_all_fueltypes = enduse_func.get_peak_day_all_fueltypes(ed_fueltype_national_yh)
logging.info("Peak day 'peak_day_all_fueltypes': " + str(peak_day_all_fueltypes))
fueltype = fueltypes['electricity']
peak_day_electricity, _ = enduse_func.get_peak_day_single_fueltype(ed_fueltype_national_yh[fueltype])
logging.info("Peak day 'peak_day_electricity': " + str(peak_day_electricity))
elec_national_data.compare_peak(
"validation_peak_elec_day_all_fueltypes.pdf",
result_paths['data_results_validation'],
elec_2015_indo[peak_day_all_fueltypes],
ed_fueltype_national_yh[fueltypes['electricity']][peak_day_all_fueltypes],
peak_day_all_fueltypes)
elec_national_data.compare_peak(
"validation_peak_elec_day_only_electricity.pdf",
result_paths['data_results_validation'],
elec_2015_indo[peak_day_electricity],
ed_fueltype_national_yh[fueltypes['electricity']][peak_day_electricity],
peak_day_electricity)
# Manual peak day
peak_day = 19
elec_national_data.compare_peak(
"validation_elec_peak_day_{}.pdf".format(peak_day),
result_paths['data_results_validation'],
elec_factored_yh[peak_day],
ed_fueltype_national_yh[fueltypes['electricity']][peak_day],
peak_day)
peak_day = 33
elec_national_data.compare_peak(
"validation_elec_peak_day_{}.pdf".format(peak_day),
result_paths['data_results_validation'],
elec_factored_yh[peak_day],
ed_fueltype_national_yh[fueltypes['electricity']][peak_day],
peak_day)
peak_day_real_electricity, _ = enduse_func.get_peak_day_single_fueltype(elec_2015_indo)
logging.info("Peak day 'peak_day_electricity': " + str(peak_day_real_electricity))
#raise Exception
elec_national_data.compare_peak(
"validation_elec_peak_day_{}.pdf".format(peak_day_real_electricity),
result_paths['data_results_validation'],
elec_factored_yh[peak_day],
ed_fueltype_national_yh[fueltypes['electricity']][peak_day_real_electricity],
peak_day_real_electricity)
# ---------------------------------------------------
# Validate boxplots for every hour (temporal validation)
# ---------------------------------------------------
elec_national_data.compare_results_hour_boxplots(
"validation_hourly_boxplots_electricity_01.pdf",
result_paths['data_results_validation'],
elec_2015_indo,
ed_fueltype_national_yh[fueltypes['electricity']])
return
def spatial_validation(
subnational_modelled,
subnational_real,
regions,
fueltype_str,
fig_name,
label_points=False,
plotshow=False
):
"""Compare gas/elec demand for LADs
Arguments
----------
lad_infos_shapefile : dict
Infos of shapefile (dbf / csv)
ed_fueltype_regs_yh : object
Regional fuel Given as GWh (?)
subnational_real : dict
for electricity: Sub-national electrcity demand given as GWh
Note
-----
SOURCE OF LADS:
- Data for northern ireland is not included in that, however in BEIS dataset!
"""
logging.debug("... Validation of spatial disaggregation")
result_dict = {}
result_dict['real_demand'] = {}
result_dict['modelled_demand'] = {}
diff_real_modelled_p = []
diff_real_modelled_abs = []
# -------------------------------------------
# Match ECUK sub-regional demand with geocode and calculate statistics
# -------------------------------------------
for region in regions:
try:
if subnational_real[region] == 0:
pass
else:
try:
real = subnational_real[region]
modelled = subnational_modelled[region]
# --Sub Regional Electricity demand (as GWh)
result_dict['real_demand'][region] = real
result_dict['modelled_demand'][region] = modelled
diff_real_modelled_p.append(abs(100 - ((100 / real) * modelled)))
diff_real_modelled_abs.append(real - modelled)
except KeyError:
pass #not both data for reald and modelled
except KeyError:
logging.debug(
"Sub-national spatial validation: No fuel for region %s", region)
# Calculate the average deviation between reald and modelled
av_deviation_real_modelled = np.average(diff_real_modelled_p)
median_absolute_deviation = np.median(diff_real_modelled_p) # median deviation
# Calculate standard deviation
std_dev_p = np.std(diff_real_modelled_p) # Given as percent
std_dev_abs = np.std(diff_real_modelled_abs) # Given as energy unit
# -----------------
# Sort results according to size
# -----------------
sorted_dict_real = sorted(
result_dict['real_demand'].items(),
key=operator.itemgetter(1))
# -------------------------------------
# Plot
# -------------------------------------
fig = plt.figure(figsize=basic_plot_functions.cm2inch(9, 8))
ax = fig.add_subplot(1, 1, 1)
x_values = np.arange(0, len(sorted_dict_real), 1)
y_real_demand = []
y_modelled_demand = []
labels = []
for sorted_region in sorted_dict_real:
geocode_lad = sorted_region[0]
y_real_demand.append(result_dict['real_demand'][geocode_lad])
y_modelled_demand.append(result_dict['modelled_demand'][geocode_lad])
logging.debug(
"validation %s LAD %s: real: %s modelled: %s modelled percentage: %s (%sp diff)",
fueltype_str,
geocode_lad,
round(result_dict['real_demand'][geocode_lad], 4),
round(result_dict['modelled_demand'][geocode_lad], 4),
round(100 / result_dict['real_demand'][geocode_lad] * result_dict['modelled_demand'][geocode_lad], 4),
round(100 - (100 / result_dict['real_demand'][geocode_lad] * result_dict['modelled_demand'][geocode_lad]), 4))
labels.append(geocode_lad)
# Calculate r_squared
_slope, _intercept, r_value, _p_value, _std_err = stats.linregress(
y_real_demand,
y_modelled_demand)
# --------
# Axis
# --------
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
# ----------------------------------------------
# Plot
# ----------------------------------------------
plt.plot(
x_values,
y_real_demand,
linestyle='None',
marker='o',
alpha=0.6,
markersize=1.6,
fillstyle='full',
markerfacecolor='grey',
markeredgewidth=0.2,
color='black',
label='actual')
plt.plot(
x_values,
y_modelled_demand,
marker='o',
linestyle='None',
markersize=1.6,
alpha=0.6,
markerfacecolor='white',
fillstyle='none',
markeredgewidth=0.5,
markeredgecolor='blue',
color='black',
label='model')
plt.ylim(ymin=0)
# -----------
# Labelling
# -----------
if label_points:
for pos, txt in enumerate(labels):
ax.text(
x_values[pos],
y_modelled_demand[pos],
txt,
horizontalalignment="right",
verticalalignment="top",
fontsize=1)
font_additional_info = plotting_styles.font_info(size=4)
title_info = ('R_2: {}, std_%: {} (GWh {}), av_diff_%: {} median_abs_dev: {}'.format(
round(r_value, 2),
round(std_dev_p, 2),
round(std_dev_abs, 2),
round(av_deviation_real_modelled, 2),
round(median_absolute_deviation, 2)))
plt.title(
title_info,
loc='left',
fontdict=font_additional_info)
plt.xlabel("UK regions (excluding northern ireland)")
plt.ylabel("{} [GWh]".format(fueltype_str))
# --------
# Legend
# --------
plt.legend(
prop={
'family': 'arial',
'size': 8},
frameon=False)
# Tight layout
plt.margins(x=0)
plt.tight_layout()
plt.savefig(fig_name)
if plotshow:
plt.show()
else:
plt.close()
def spatial_validation_multiple(
reg_coord,
input_data,
regions,
fueltype_str,
fig_name,
label_points=False,
plotshow=False
):
"""Compare gas/elec demand for LADs
Arguments
----------
lad_infos_shapefile : dict
Infos of shapefile (dbf / csv)
ed_fueltype_regs_yh : object
Regional fuel Given as GWh (?)
subnational_real : dict
for electricity: Sub-national electrcity demand given as GWh
Note
-----
SOURCE OF LADS:
- Data for northern ireland is not included in that, however in BEIS dataset!
"""
logging.debug("... Validation of spatial disaggregation")
color_list = ['firebrick', 'darkseagreen']
label_list = ['domestic', 'non_domestic']
# -------------------------------------
# Plot
# -------------------------------------
fig = plt.figure(
figsize=basic_plot_functions.cm2inch(9, 8)) #width, height
ax = fig.add_subplot(1, 1, 1)
cnt_color = 0
for i in input_data:
subnational_modelled = i[0]
subnational_real = i[1]
result_dict = {}
result_dict['real_demand'] = {}
result_dict['modelled_demand'] = {}
# -------------------------------------------
# Match ECUK sub-regional demand with geocode
# -------------------------------------------
for region in regions:
for reg_geocode in reg_coord:
if reg_geocode == region:
try:
# Test wheter data is provided for LAD or owtherwise ignore
if subnational_real[reg_geocode] == 0:
pass
else:
# --Sub Regional Electricity demand (as GWh)
result_dict['real_demand'][reg_geocode] = subnational_real[reg_geocode]
result_dict['modelled_demand'][reg_geocode] = subnational_modelled[reg_geocode]
except KeyError:
logging.debug(
"Sub-national spatial validation: No fuel for region %s", reg_geocode)
# --------------------
# Calculate statistics
# --------------------
diff_real_modelled_p = []
diff_real_modelled_abs = []
y_real_demand = []
y_modelled_demand = []
# -----------------
# Sort results according to size
# -----------------
sorted_dict_real = sorted(
result_dict['real_demand'].items(),
key=operator.itemgetter(1))
#for reg_geocode in regions:
for reg_geocode, _ in sorted_dict_real:
# Test if real and modelled data are both available
try:
real = result_dict['real_demand'][reg_geocode]
modelled = result_dict['modelled_demand'][reg_geocode]
diff_real_modelled_p.append(abs(100 - ((100 / real) * modelled))) # Average abs deviation
diff_real_modelled_abs.append(real - modelled)
y_real_demand.append(real)
y_modelled_demand.append(modelled)
except KeyError:
pass
# Calculate the average deviation between reald and modelled
av_deviation_real_modelled = np.average(diff_real_modelled_p) # average deviation
median_absolute_deviation = np.median(diff_real_modelled_p) # median deviation
# Calculate standard deviation
std_dev_p = np.std(diff_real_modelled_p) # Given as percent
std_dev_abs = np.std(diff_real_modelled_abs) # Given as energy unit
x_values = np.arange(0, len(y_real_demand), 1)
labels = []
for sorted_region in sorted_dict_real:
if sorted_region in y_real_demand:
geocode_lad = sorted_region[0]
logging.debug(
"validation %s LAD %s: real: %s modelled: %s modelled percentage: %s (%sp diff)",
fueltype_str,
geocode_lad,
round(result_dict['real_demand'][geocode_lad], 4),
round(result_dict['modelled_demand'][geocode_lad], 4),
round(100 / result_dict['real_demand'][geocode_lad] * result_dict['modelled_demand'][geocode_lad], 4),
round(100 - (100 / result_dict['real_demand'][geocode_lad] * result_dict['modelled_demand'][geocode_lad]), 4))
labels.append(geocode_lad)
# Calculate r_squared
_slope, _intercept, r_value, _p_value, _std_err = stats.linregress(
y_real_demand,
y_modelled_demand)
# --------
# Axis
# --------
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
# ----------------------------------------------
# Plot
# ----------------------------------------------
plt.plot(
x_values,
y_real_demand,
linestyle='None',
marker='o',
alpha=0.7,
markersize=1.6,
fillstyle='full',
markerfacecolor='grey',
markeredgewidth=0.2,
color=color_list[cnt_color],
markeredgecolor=color_list[cnt_color],
label='actual')
plt.plot(
x_values,
y_modelled_demand,
marker='o',
linestyle='None',
markersize=1.6,
alpha=0.7,
markerfacecolor='white',
fillstyle='none',
markeredgewidth=0.5,
markeredgecolor=color_list[cnt_color],
color=color_list[cnt_color],
label='model' + label_list[cnt_color])
# -----------
# Labelling
# -----------
if label_points:
for pos, txt in enumerate(labels):
ax.text(
x_values[pos],
y_modelled_demand[pos],
txt,
horizontalalignment="right",
verticalalignment="top",
fontsize=1)
font_additional_info = plotting_styles.font_info(size=3, color=color_list[cnt_color])
title_info = ('R_2: {}, std_%: {} (GWh {}), av_diff_%: {} median_abs_dev: {}'.format(
round(r_value, 2),
round(std_dev_p, 2),
round(std_dev_abs, 2),
round(av_deviation_real_modelled, 2),
round(median_absolute_deviation, 2)))
plt.text(
0.4,
0.9 - cnt_color/10,
title_info,
ha='center',
va='center',
transform=ax.transAxes,
fontdict=font_additional_info)
cnt_color =+ 1
plt.xlabel("UK regions (excluding northern ireland)")
plt.ylabel("{} [GWh]".format(fueltype_str))
plt.ylim(ymin=0)
# --------
# Legend
# --------
plt.legend(
prop={
'family': 'arial',
'size': 6},
frameon=False)
# Tight layout
plt.margins(x=0)
plt.tight_layout()
plt.savefig(fig_name)
if plotshow:
plt.show()
else:
plt.close()
| nismod/energy_demand | energy_demand/validation/lad_validation.py | Python | mit | 33,133 | 0.003501 |
# Copyright 2007-2019 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Define tools for interacting with the build system and a fake build system for development."""
from functools import wraps
from threading import Lock
import logging
import os
import time
import typing
import backoff
import koji
if typing.TYPE_CHECKING: # pragma: no cover
from bodhi.server.config import BodhiConfig # noqa: 401
log = logging.getLogger('bodhi')
_buildsystem = None
_buildsystem_login_lock = Lock()
# URL of the koji hub
_koji_hub = None
def multicall_enabled(func: typing.Callable[..., typing.Any]) -> typing.Callable[..., typing.Any]:
"""
Decorate the given callable to enable multicall handling.
This is used by DevBuildsys methods.
Args:
func: The function to wrap.
Returns:
A wrapped version of func.
"""
@wraps(func)
def wrapper(self, *args, **kwargs) -> typing.Any:
"""
If multicall is enabled, store the results from func on self.
If multicall is not enabled, just call func and return its results as per usual.
"""
if not self.multicall:
return func(self, *args, **kwargs)
# disable multicall during execution, so that inner func calls to other
# methods don't append their results as well
self._multicall = False
result = func(self, *args, **kwargs)
self.multicall_result.append([result])
self._multicall = True
return wrapper
class DevBuildsys:
"""A dummy buildsystem instance used during development and testing."""
_side_tag_data = [{'id': 1234, 'name': 'f17-build-side-1234'},
{'id': 7777, 'name': 'f17-build-side-7777'}]
__untag__ = [] # type: typing.List[typing.Tuple[str, str]]
__moved__ = [] # type: typing.List[typing.Tuple[str, str, str]]
__added__ = [] # type: typing.List[typing.Tuple[str, str]]
__tagged__ = {} # type: typing.Mapping[str, typing.List[str]]
__rpms__ = [] # type: typing.List[typing.Dict[str, object]]
__tags__ = [] # type: typing.List[typing.Tuple[str, typing.Mapping[str, typing.Any]]]
__side_tags__ = _side_tag_data # type: typing.List[typing.Dict[str, object]]
__removed_side_tags__ = [] # type: typing.List[typing.Dict[str, object]]
_build_data = {'build_id': 16058,
'completion_time': '2007-08-24 23:26:10.890319',
'completion_ts': 1187997970,
'creation_event_id': 151517,
'creation_time': '2007-08-24 19:38:29.422344',
'extra': None,
'epoch': None,
'owner_id': 388,
'owner_name': 'lmacken',
'package_id': 8,
'state': 1,
'tag_id': 19,
'task_id': 127621}
def __init__(self):
"""Initialize the DevBuildsys."""
self._multicall = False
self.multicall_result = []
@property
def _side_tag_ids_names(self):
return {id_or_name
for taginfo in self._side_tag_data
for id_or_name in (taginfo['id'], taginfo['name'])}
@property
def multicall(self) -> bool:
"""
Return the value of self._multicall.
Returns:
object: The value of self._multicall.
"""
return self._multicall
@multicall.setter
def multicall(self, value: bool):
"""
Set the _multicall attribute to the given value.
Args:
value: The value to set the _multicall attribute to.
"""
self._multicall = value
self.multicall_result = []
@classmethod
def clear(cls):
"""Clear the state of the class variables."""
cls.__untag__ = []
cls.__moved__ = []
cls.__added__ = []
cls.__tagged__ = {}
cls.__rpms__ = []
cls.__tags__ = []
cls.__side_tags__ = list(cls._side_tag_data)
def multiCall(self):
"""Emulate Koji's multiCall."""
result = self.multicall_result
self.multicall = False
return result
def moveBuild(self, from_tag: str, to_tag: str, build: str, *args, **kw):
"""Emulate Koji's moveBuild."""
if to_tag is None:
raise RuntimeError('Attempt to tag {} with None.'.format(build))
log.debug("moveBuild(%s, %s, %s)" % (from_tag, to_tag, build))
DevBuildsys.__moved__.append((from_tag, to_tag, build))
@multicall_enabled
def tagBuild(self, tag: str, build: str, *args, **kw):
"""Emulate Koji's tagBuild."""
if tag is None:
raise RuntimeError('Attempt to tag {} with None.'.format(build))
log.debug("tagBuild(%s, %s)" % (tag, build))
DevBuildsys.__added__.append((tag, build))
@multicall_enabled
def untagBuild(self, tag: str, build: str, *args, **kw):
"""Emulate Koji's untagBuild."""
if tag is None:
raise RuntimeError('Attempt to untag {} with None.'.format(build))
log.debug("untagBuild(%s, %s)" % (tag, build))
DevBuildsys.__untag__.append((tag, build))
def ssl_login(self, *args, **kw):
"""Emulate Koji's ssl_login."""
log.debug("ssl_login(%s, %s)" % (args, kw))
def taskFinished(self, task: int) -> bool:
"""Emulate Koji's taskFinished."""
return True
def getTaskInfo(self, task: int) -> typing.Mapping[str, int]:
"""Emulate Koji's getTaskInfo."""
return {'state': koji.TASK_STATES['CLOSED']}
def getTaskRequest(self, task_id: int) -> typing.List[typing.Union[str, typing.Mapping]]:
"""Emulate Koji's getTaskRequest."""
return [
'git://pkgs.fedoraproject.org/rpms/bodhi?#2e994ca8b3296e62e8b0aadee1c5c0649559625a',
'f17-candidate', {}]
def listPackages(self) -> typing.List[typing.Mapping[str, typing.Union[int, str]]]:
"""Emulate Koji's listPackages."""
return [
{'package_id': 2625, 'package_name': 'nethack'},
]
@multicall_enabled
def getBuild(self, build='TurboGears-1.0.2.2-2.fc17', other=False, testing=False):
"""Emulate Koji's getBuild."""
# needed to test against non-existent builds
if 'youdontknowme' in build:
return None
if 'gnome-backgrounds-3.0-1.fc17' in build:
return {'name': 'gnome-backgrounds',
'nvr': 'gnome-backgrounds-3.0-1.fc17',
'package_name': 'gnome-backgrounds',
'release': '1.fc17',
'tag_name': 'f17-build-side-7777',
'version': '3.0'}
theid = 16058
if other and not testing:
theid = 16059
elif other and testing:
theid = 16060
data = self._build_data.copy()
data['id'] = theid
if 'noowner' in build:
del data['owner_name']
name, version, release = build.rsplit("-", 2)
release_tokens = release.split(".")
for token in release_tokens:
# Starting to hardcode some dev buildsys bits for docker.
# See https://github.com/fedora-infra/bodhi/pull/1543
if token.endswith("container") or token.endswith("flatpak"):
fedora_release = "f" + (token
.replace("fc", "")
.replace("flatpak", "")
.replace("container", ""))
tag = "%s-updates-testing" % fedora_release
format_data = {
'registry': 'candidate-registry.fedoraproject.org',
'hash': 'sha256:2bd64a888...',
'version': version,
'release': release
}
if token.endswith("flatpak"):
format_data['repository'] = name
else:
tag = "f%s-updates-testing" % token.replace("fc", "").replace("container", "")
format_data['repository'] = "{}/{}".format(fedora_release, name)
data['extra'] = {
'container_koji_task_id': 19708268,
'image': {
'index': {
'pull': ['{registry}/{repository}@sha256:{hash}'
.format(**format_data),
'{registry}/{repository}:{version}-{release}'
.format(**format_data)],
}
},
}
if token.endswith("flatpak"):
data['extra']['image']['flatpak'] = True
break
# Hardcoding for modules in the dev buildsys
if token.startswith("2017"):
tag = "f27M-updates-testing"
data['extra'] = {
'typeinfo': {'module': {'more': 'mbs stuff goes here'}}
}
break
if token.startswith("fc"):
if testing:
tag = "f%s-updates-testing" % token.replace("fc", "")
break
else:
tag = "f%s-updates-candidate" % token.replace("fc", "")
break
if token.startswith("el"):
tag = "dist-%sE-epel-testing-candidate" % token.replace("el", "")
break
else:
raise ValueError("Couldn't determine dist for build '%s'" % build)
if other:
if testing:
release_tokens[0] = str(int(release_tokens[0]) + 2)
else:
release_tokens[0] = str(int(release_tokens[0]) + 1)
release = ".".join(release_tokens)
build = "%s-%s-%s" % (name, version, release)
data.update({'name': name,
'nvr': build,
'package_name': name,
'release': release,
'tag_name': tag,
'version': version})
if 'testmissingnvr' in build:
del data['nvr']
return data
def listBuildRPMs(self, id: int, *args, **kw) -> typing.List[typing.Dict[str, object]]:
"""Emulate Koji's listBuildRPMs."""
rpms = [{'arch': 'src',
'build_id': 6475,
'buildroot_id': 1883,
'buildtime': 1178868422,
'epoch': None,
'id': 62330,
'name': 'TurboGears',
'nvr': 'TurboGears-1.0.2.2-2.fc17',
'payloadhash': '6787febe92434a9be2a8f309d0e2014e',
'release': '2.fc17',
'size': 761742,
'version': '1.0.2.2'},
{'arch': 'noarch',
'build_id': 6475,
'buildroot_id': 1883,
'buildtime': 1178868537,
'epoch': None,
'id': 62331,
'name': 'TurboGears',
'nvr': 'TurboGears-1.0.2.2-2.fc17',
'payloadhash': 'f3ec9bdce453816f94283a15a47cb952',
'release': '2.fc17',
'size': 1993385,
'version': '1.0.2.2'},
]
if id == 16059: # for updateinfo.xml tests
rpms[0]['nvr'] = rpms[1]['nvr'] = 'TurboGears-1.0.2.2-3.fc17'
rpms[0]['release'] = rpms[1]['release'] = '3.fc17'
rpms += DevBuildsys.__rpms__
return rpms
def listTags(self, build: str, *args, **kw) -> typing.List[typing.Dict[str, object]]:
"""Emulate Koji's listTags."""
if 'el5' in build or 'el6' in build:
release = build.split('.')[-1].replace('el', '')
result = [
{'arches': 'i386 x86_64 ppc ppc64', 'id': 10, 'locked': True,
'name': 'dist-%sE-epel-testing-candidate' % release, 'perm': None,
'perm_id': None},
{'arches': 'i386 x86_64 ppc ppc64', 'id': 10, 'locked': True,
'name': 'dist-%sE-epel-testing-candidate' % release, 'perm': None,
'perm_id': None},
{'arches': 'i386 x86_64 ppc ppc64', 'id': 5, 'locked': True,
'name': 'dist-%sE-epel' % release, 'perm': None, 'perm_id': None}]
elif 'el7' in build:
release = build.split('.')[-1].replace('el', 'epel')
result = [
{'arches': 'i386 x86_64 ppc ppc64', 'id': 10, 'locked': True,
'name': '%s-testing-candidate' % release, 'perm': None, 'perm_id': None},
{'arches': 'i386 x86_64 ppc ppc64', 'id': 5, 'locked': True, 'name': '%s' % release,
'perm': None, 'perm_id': None},
{'arches': 'i386 x86_64 ppc ppc64', 'id': 5, 'locked': True,
'name': '%s-testing' % release, 'perm': None, 'perm_id': None}]
elif '-master-' in build or build.startswith(('nodejs-6-', 'nodejs-8-', 'nodejs-9-')):
# Hardcoding for modules in the dev buildsys
result = [
{'arches': 'x86_64', 'id': 15, 'locked': True,
'name': 'f27M-updates-candidate'},
{'arches': 'x86_64', 'id': 16, 'locked': True,
'name': 'f27M-updates-testing'},
{'arches': 'x86_64', 'id': 17, 'locked': True,
'name': 'f27M'},
]
elif build.endswith("container"):
result = [
{'arches': 'x86_64', 'id': 15, 'locked': True,
'name': 'f28C-updates-candidate'},
{'arches': 'x86_64', 'id': 16, 'locked': True,
'name': 'f28C-updates-testing'},
{'arches': 'x86_64', 'id': 17, 'locked': True,
'name': 'f28C'},
]
elif 'flatpak' in build:
result = [
{'arches': 'x86_64', 'id': 15, 'locked': True,
'name': 'f28F-updates-candidate'},
{'arches': 'x86_64', 'id': 16, 'locked': True,
'name': 'f28F-updates-testing'},
{'arches': 'x86_64', 'id': 17, 'locked': True,
'name': 'f28F'},
]
else:
release = build.split('.')[-1].replace('fc', 'f').replace('~bootstrap', '')
result = [
{'arches': 'i386 x86_64 ppc ppc64', 'id': 10, 'locked': True,
'name': '%s-updates-candidate' % release, 'perm': None, 'perm_id': None},
{'arches': 'i386 x86_64 ppc ppc64', 'id': 5, 'locked': True, 'name': '%s' % release,
'perm': None, 'perm_id': None},
{'arches': 'i386 x86_64 ppc ppc64', 'id': 5, 'locked': True,
'name': '%s-updates-testing' % release, 'perm': None, 'perm_id': None}]
if build in DevBuildsys.__tagged__:
for tag in DevBuildsys.__tagged__[build]:
result += [{'name': tag}]
return result
@multicall_enabled
def listTagged(self, tag: str, *args, **kw) -> typing.List[typing.Any]:
"""List updates tagged with the given tag."""
latest = kw.get('latest', False)
if tag in self._side_tag_ids_names:
return [self.getBuild(build="gnome-backgrounds-3.0-1.fc17")]
builds = []
all_builds = [self.getBuild(),
self.getBuild(other=True),
self.getBuild(other=True, testing=True)]
if latest:
# Delete all older builds which aren't the latest for their tag.
# Knowing which these are is simpler than trying to rpmvercmp.
del all_builds[0]
for build in all_builds:
if build['nvr'] in self.__untag__:
log.debug('Pruning koji build %s' % build['nvr'])
continue
elif build['tag_name'] == tag:
builds.append(build)
for build in DevBuildsys.__tagged__:
for tag_ in DevBuildsys.__tagged__[build]:
if tag_ == tag:
builds.append(self.getBuild(build))
return builds
def getLatestBuilds(self, *args, **kw) -> typing.List[typing.Any]:
"""
Return a list of the output from self.getBuild().
Returns:
list: A list of the latest builds from getBuild().
"""
return [self.getBuild()]
@multicall_enabled
def getTag(self, taginfo, **kw):
"""
Retrieve the given tag from koji.
Args:
taginfo (int or str): The tag you want info about.
strict (bool): If True, raise an Exception if epel tags are queried. Defaults to False.
Returns:
dict or None: A dictionary of tag information, or None if epel is requested and strict
is False.
Raises:
koji.GenericError: If strict is True and epel is requested.
"""
if isinstance(taginfo, int):
taginfo = "f%d" % taginfo
if taginfo.startswith("epel"):
if kw.get("strict", False):
raise koji.GenericError("Invalid tagInfo: '%s'" % taginfo)
else:
return None
# These tags needs to be created
if taginfo in ["f32-build-side-1234-signing-pending",
"f32-build-side-1234-testing-pending"]:
return None
# emulate a side-tag response
if taginfo in self._side_tag_ids_names:
for sidetag in self.__side_tags__:
if taginfo in (sidetag['id'], sidetag['name']):
return {'maven_support': False, 'locked': False, 'name': sidetag['name'],
'extra': {'sidetag_user': 'dudemcpants', 'sidetag': True},
'perm': None, 'perm_id': None, 'arches': None,
'maven_include_all': False, 'id': sidetag['id']}
if kw.get('strict'):
raise koji.GenericError("Invalid tagInfo: '%s'" % taginfo)
else:
return None
return {'maven_support': False, 'locked': False, 'name': taginfo,
'extra': {}, 'perm': None, 'id': 246, 'arches': None,
'maven_include_all': False, 'perm_id': None}
def getFullInheritance(self, taginfo, **kw):
"""
Return a tag inheritance.
Args:
taginfo (int or str): The tag. does not impact the output
Returns:
list: A list of dicts of tag information
"""
return [{'intransitive': False, 'name': 'f17-build', 'pkg_filter': '', 'priority': 0,
'parent_id': 6448, 'maxdepth': None, 'noconfig': False, 'child_id': 7715,
'nextdepth': None, 'filter': [], 'currdepth': 1},
{'intransitive': False, 'name': 'f17-override', 'pkg_filter': '', 'priority': 0,
'parent_id': 6447, 'maxdepth': None, 'noconfig': False, 'child_id': 6448,
'nextdepth': None, 'filter': [], 'currdepth': 2},
{'intransitive': False, 'name': 'f17-updates', 'pkg_filter': '', 'priority': 0,
'parent_id': 6441, 'maxdepth': None, 'noconfig': False, 'child_id': 6447,
'nextdepth': None, 'filter': [], 'currdepth': 3},
{'intransitive': False, 'name': 'f17', 'pkg_filter': '', 'priority': 0,
'parent_id': 6438, 'maxdepth': None, 'noconfig': False, 'child_id': 6441,
'nextdepth': None, 'filter': [], 'currdepth': 4}]
def listSideTags(self, **kw):
"""Return a list of side-tags."""
return self.__side_tags__
def createTag(self, tag: str, **opts):
"""Emulate tag adding."""
if 'parent' not in opts:
raise ValueError('No parent in tag options')
for nr in self.__tags__:
if self.__tags__[0] == tag:
raise ValueError('Tag %s already exists' % tag)
opts['locked'] = False
opts['maven_support'] = False
opts['name'] = tag
opts['perm'] = 'admin'
opts['arches'] = None
opts['maven_include_all'] = False
opts['perm_id'] = 1
self.__tags__.append((tag, opts))
def editTag2(self, *args, **kw):
"""Edit a tag."""
pass
def deleteTag(self, tagid: typing.Union[str, int]):
"""Emulate tag deletion."""
if isinstance(tagid, str):
for tid, tinfo in self.__tags__:
if tagid == tid:
self.__tags__.remove((tid, tinfo))
return
else:
del self.__tags__[tagid]
def getRPMHeaders(self, rpmID: str,
headers: typing.Any) -> typing.Union[typing.Mapping[str, str], None]:
"""
Return headers for the given RPM.
Args:
rpmID: The RPM you want headers for.
headers: Unused.
Returns:
A dictionary of RPM headers, or None if the rpmID is not found.
"""
if rpmID == 'raise-exception.src':
raise Exception
elif rpmID == 'do-not-find-anything.src':
return None
else:
headers = {
'description': (
"The libseccomp library provides an easy to use interface to the "
"Linux Kernel's\nsyscall filtering mechanism, seccomp. The "
"libseccomp API allows an application\nto specify which "
"syscalls, and optionally which syscall arguments, the\n"
"application is allowed to execute, all of which are "
"enforced by the Linux\nKernel."),
'url': 'http://libseccomp.sourceforge.net',
'changelogname': [
'Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.1.0-1',
'Paul Moore <pmoore@redhat.com> - 2.1.0-0',
'Paul Moore <pmoore@redhat.com> - 2.0.0-0',
'Paul Moore <pmoore@redhat.com> - 1.0.1-0',
'Paul Moore <pmoore@redhat.com> - 1.0.0-0',
'Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 0.1.0-2',
'Paul Moore <pmoore@redhat.com> - 0.1.0-1',
'Paul Moore <pmoore@redhat.com> - 0.1.0-0'],
'summary': 'Enhanced seccomp library',
'version': '2.1.0',
'changelogtime': [
1375531200, 1370952000, 1359374400, 1352808000, 1343736000,
1342699200, 1341921600, 1339502400],
'changelogtext': [
'- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild',
'- New upstream version\n- Added support for the ARM architecture\n'
'- Added the scmp_sys_resolver tool',
'- New upstream version',
'- New upstream version with several important fixes',
'- New upstream version\n- Remove verbose build patch as it is no '
'longer needed\n- Enable _smp_mflags during build stage',
'- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild',
'- Limit package to x86/x86_64 platforms (RHBZ #837888)',
'- Initial version'],
'release': '1.fc20',
'name': 'libseccomp'
}
if rpmID == "TurboGears-2.0.0.0-1.fc17.src":
headers['changelogname'].insert(0, 'Randy Barlow <bowlofeggs@fp.o> - 2.2.0-1')
headers['changelogtext'].insert(0, '- Added some bowlofeggs charm.')
headers['changelogtime'].insert(0, 1375531201)
elif rpmID == 'TurboGears-1.9.1-1.fc17.src':
headers['changelogtext'] = []
elif rpmID == 'TurboGears-1.9.1-42.fc17.src':
# Make sure only a single changelog entry is present
headers['changelogname'] = ['Randy Barlow <bowlofeggs@fp.o> - 1.9.1-42']
headers['changelogtext'] = ["- Hope I didn't break anything!"]
headers['changelogtime'] = [1375531200]
return headers
@backoff.on_exception(backoff.expo, koji.AuthError, max_time=600)
def koji_login(config: 'BodhiConfig', authenticate: bool) -> koji.ClientSession:
"""
Login to Koji and return the session.
Args:
config: Bodhi's configuration dictionary.
authenticate: If True, establish an authenticated client session.
Returns:
An authenticated Koji ClientSession that is ready to use.
"""
koji_options = {
'krb_rdns': False,
'max_retries': 30,
'retry_interval': 10,
'offline_retry': True,
'offline_retry_interval': 10,
'anon_retry': True,
}
koji_client = koji.ClientSession(_koji_hub, koji_options)
if authenticate and not koji_client.gssapi_login(**get_krb_conf(config)):
log.error('Koji gssapi_login failed')
return koji_client
def get_krb_conf(config: 'BodhiConfig') -> typing.Mapping[str, str]:
"""
Return arguments for gssapi_login.
Args:
config: Bodhi's configuration dictionary.
Returns:
A dictionary containing three keys:
principal: The kerberos principal to use.
keytab: The kerberos keytab to use.
ccache: The kerberos ccache to use.
"""
principal = config.get('krb_principal')
keytab = config.get('krb_keytab')
ccache = config.get('krb_ccache')
args = {}
if principal:
args['principal'] = principal
if keytab:
args['keytab'] = keytab
if ccache:
ccache = ccache.replace('%{uid}', str(os.geteuid()))
args['ccache'] = ccache
return args
def get_session() -> typing.Union[koji.ClientSession, DevBuildsys]:
"""
Get a new buildsystem instance.
Returns:
A buildsystem client instance.
Raises:
RuntimeError: If the build system has not been initialized. See setup_buildsystem().
"""
global _buildsystem
if _buildsystem is None:
raise RuntimeError('Buildsys needs to be setup')
with _buildsystem_login_lock:
return _buildsystem()
def teardown_buildsystem():
"""Tear down the build system."""
global _buildsystem
_buildsystem = None
DevBuildsys.clear()
def setup_buildsystem(settings: 'BodhiConfig', authenticate: bool = True):
"""
Initialize the buildsystem client.
Args:
settings: Bodhi's config.
authenticate: If True, establish an authenticated Koji session. Defaults to True.
Raises:
ValueError: If the buildsystem is configured to an invalid value.
"""
global _buildsystem, _koji_hub
if _buildsystem:
return
_koji_hub = settings.get('koji_hub')
buildsys = settings.get('buildsystem')
if buildsys == 'koji':
log.debug('Using Koji Buildsystem')
def get_koji_login():
"""Call koji_login with settings and return the result."""
return koji_login(config=settings, authenticate=authenticate)
_buildsystem = get_koji_login
elif buildsys in ('dev', 'dummy', None):
log.debug('Using DevBuildsys')
_buildsystem = DevBuildsys
else:
raise ValueError('Buildsys %s not known' % buildsys)
def wait_for_tasks(
tasks: typing.List[typing.Any],
session: typing.Union[koji.ClientSession, None] = None,
sleep: int = 300) -> typing.List[typing.Any]:
"""
Wait for a list of koji tasks to complete.
Args:
tasks: The return value of Koji's multiCall().
session: A Koji client session to use. If not provided, the
function will acquire its own session.
sleep: How long to sleep between polls on Koji when waiting for tasks to complete.
Returns:
A list of failed tasks. An empty list indicates that all tasks completed successfully.
"""
log.debug("Waiting for %d tasks to complete: %s" % (len(tasks), tasks))
failed_tasks = []
if not session:
session = get_session()
for task in tasks:
if not task:
log.debug("Skipping task: %s" % task)
continue
while not session.taskFinished(task):
time.sleep(sleep)
task_info = session.getTaskInfo(task)
if task_info['state'] != koji.TASK_STATES['CLOSED']:
log.error("Koji task %d failed" % task)
failed_tasks.append(task)
log.debug("%d tasks completed successfully, %d tasks failed." % (
len(tasks) - len(failed_tasks), len(failed_tasks)))
return failed_tasks
| fedora-infra/bodhi | bodhi-server/bodhi/server/buildsys.py | Python | gpl-2.0 | 29,649 | 0.001889 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for AMT ManagementInterface
"""
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.amt import common as amt_common
from ironic.drivers.modules.amt import management as amt_mgmt
from ironic.drivers.modules.amt import resource_uris
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_amt_info()
CONF = cfg.CONF
@mock.patch.object(amt_common, 'pywsman', spec_set=mock_specs.PYWSMAN_SPEC)
class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementInteralMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=INFO_DICT)
def test__set_boot_device_order(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._set_boot_device_order(self.node, device)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'ChangeBootOrder', mock.ANY)
def test__set_boot_device_order_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._set_boot_device_order, self.node, device)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'ChangeBootOrder', mock.ANY)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._set_boot_device_order, self.node, device)
def test__enable_boot_config(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._enable_boot_config(self.node)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'SetBootConfigRole', mock.ANY)
def test__enable_boot_config_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._enable_boot_config, self.node)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'SetBootConfigRole', mock.ANY)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._enable_boot_config, self.node)
class AMTManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.info = INFO_DICT
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=self.info)
def test_get_properties(self):
expected = amt_common.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(amt_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(amt_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_drvinfo.side_effect = iter(
[exception.InvalidParameterValue('x')])
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_get_supported_boot_devices(self):
expected = [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(
sorted(expected),
sorted(task.driver.management.
get_supported_boot_devices(task)))
def test_set_boot_device_one_time(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe')
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertFalse(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_persistent(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe',
persistent=True)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'fake-device')
@mock.patch.object(amt_mgmt, '_enable_boot_config', spec_set=True,
autospec=True)
@mock.patch.object(amt_mgmt, '_set_boot_device_order', spec_set=True,
autospec=True)
def test_ensure_next_boot_device_one_time(self, mock_sbdo, mock_ebc):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
device = boot_devices.PXE
task.node.driver_internal_info['amt_boot_device'] = 'pxe'
task.driver.management.ensure_next_boot_device(task.node, device)
self.assertEqual('disk',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
mock_sbdo.assert_called_once_with(task.node, device)
mock_ebc.assert_called_once_with(task.node)
@mock.patch.object(amt_mgmt, '_enable_boot_config', spec_set=True,
autospec=True)
@mock.patch.object(amt_mgmt, '_set_boot_device_order', spec_set=True,
autospec=True)
def test_ensure_next_boot_device_persistent(self, mock_sbdo, mock_ebc):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
device = boot_devices.PXE
task.node.driver_internal_info['amt_boot_device'] = 'pxe'
task.node.driver_internal_info['amt_boot_persistent'] = True
task.driver.management.ensure_next_boot_device(task.node, device)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
mock_sbdo.assert_called_once_with(task.node, device)
mock_ebc.assert_called_once_with(task.node)
def test_get_boot_device(self):
expected = {'boot_device': boot_devices.DISK, 'persistent': True}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected,
task.driver.management.get_boot_device(task))
def test_get_sensor_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data,
task)
| hpproliant/ironic | ironic/tests/unit/drivers/modules/amt/test_management.py | Python | apache-2.0 | 10,845 | 0 |
# -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 8
_modified_time = 1399593695.26071
_enable_loop = True
_template_filename = u'/usr/lib/python2.7/site-packages/nikola/data/themes/base/templates/comments_helper_googleplus.tmpl'
_template_uri = u'comments_helper_googleplus.tmpl'
_source_encoding = 'utf-8'
_exports = ['comment_form', 'comment_link', 'comment_link_script']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 9
__M_writer(u'\n\n')
# SOURCE LINE 14
__M_writer(u'\n\n')
# SOURCE LINE 17
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_form(context,url,title,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(u'\n<script src="https://apis.google.com/js/plusone.js"></script>\n<div class="g-comments"\n data-href="')
# SOURCE LINE 5
__M_writer(unicode(url))
__M_writer(u'"\n data-first_party_property="BLOGGER"\n data-view_type="FILTERED_POSTMOD">\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link(context,link,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 11
__M_writer(u'\n<div class="g-commentcount" data-href="')
# SOURCE LINE 12
__M_writer(unicode(link))
__M_writer(u'"></div>\n<script src="https://apis.google.com/js/plusone.js"></script>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link_script(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 16
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
| wcmckee/hamiltoncomputerclub.org.nz | static/cache/.mako.tmp/comments_helper_googleplus.tmpl.py | Python | mit | 2,205 | 0.004989 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import operator
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.lru_cache import lru_cache
from six.moves import urllib, zip_longest, zip, range
from typing import Any, List, Dict, Optional, Text
import os
import ujson
def with_language(string, language):
# type: (Text, Text) -> Text
"""
This is an expensive function. If you are using it in a loop, it will
make your code slow.
"""
old_language = translation.get_language()
translation.activate(language)
result = _(string)
translation.activate(old_language)
return result
@lru_cache()
def get_language_list():
# type: () -> List[Dict[str, Any]]
path = os.path.join(settings.STATIC_ROOT, 'locale', 'language_name_map.json')
with open(path, 'r') as reader:
languages = ujson.load(reader)
return languages['name_map']
def get_language_list_for_templates(default_language):
# type: (Text) -> List[Dict[str, Dict[str, str]]]
language_list = [l for l in get_language_list()
if 'percent_translated' not in l or
l['percent_translated'] >= 5.]
formatted_list = []
lang_len = len(language_list)
firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)
firsts = list(range(0, firsts_end))
seconds = list(range(firsts_end, lang_len))
assert len(firsts) + len(seconds) == lang_len
for row in zip_longest(firsts, seconds):
item = {}
for position, ind in zip(['first', 'second'], row):
if ind is None:
continue
lang = language_list[ind]
percent = name = lang['name']
if 'percent_translated' in lang:
percent = u"{} ({}%)".format(name, lang['percent_translated'])
item[position] = {
'name': name,
'code': lang['code'],
'percent': percent,
'selected': True if default_language == lang['code'] else False
}
formatted_list.append(item)
return formatted_list
def get_language_name(code):
# type: (str) -> Optional[Text]
for lang in get_language_list():
if lang['code'] == code:
return lang['name']
return None
def get_available_language_codes():
# type: () -> List[Text]
language_list = get_language_list()
codes = [language['code'] for language in language_list]
return codes
| verma-varsha/zulip | zerver/lib/i18n.py | Python | apache-2.0 | 2,570 | 0.002724 |
# -*- coding: UTF-8 -*-
## Copyright 2008-2011 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
import time
from datetime import date
from dateutil import parser as dateparser
from lino import dd
from lino.modlib.sales import utils
from lino.utils.instantiator import Instantiator, i2d
def objects():
Company = dd.resolve_model("contacts.Company")
Customer = dd.resolve_model('sales.Customer')
products = dd.resolve_app('products')
sales = dd.resolve_app('sales')
salesrule = Instantiator(sales.SalesRule).build
#customer = Instantiator(Customer).build
imode = Instantiator(sales.InvoicingMode,
"id channel name advance_days journal").build
for c in Company.objects.filter(country_id='BE'):
yield c.contact_ptr.insert_child(Customer)
paymentterm = Instantiator(sales.PaymentTerm,"id name").build
yield paymentterm("pp","Prepayment",days=7)
yield paymentterm("cash","Cash")
yield paymentterm("7","7 days net",days=7)
pt15 = paymentterm("15","15 days net",days=15)
yield pt15
yield paymentterm("30","30 days net",days=30)
shippingmode = Instantiator(sales.ShippingMode,"id name").build
yield shippingmode("ta","take away")
yield shippingmode("rm","regular mail")
#~ for company in Company.objects.all():
#~ yield Customer(company=company)
#~ for person in Person.objects.all():
#~ yield Customer(person=person)
#ORD = journals.get_journal_by_docclass(Order)
#INV = journals.get_journal_by_docclass(Invoice)
ORD = sales.Order.create_journal("ORD",name="Orders",printed_name="Order # %d")
#ORD = journals.get_journal("ORD")
#INV = journals.get_journal("INV")
yield ORD
INV = sales.Invoice.create_journal("INV",\
account=ledger.Account.objects.get(match="4000"),
name="Invoices",printed_name="Invoice # %d")
#~ INV = sales.Invoice.create_journal("INV",account="4000",name="Invoices")
yield INV
imode_e = imode('e','E','By e-mail',2,INV,template='order_invoice.odt',build_method='appyodt')
yield imode_e
imode_p = imode('p','P','By snail mail',10,INV,template='order_invoice.odt',build_method='appyodt')
yield imode_p
yield salesrule(imode='e',shipping_mode="ta",payment_term="7")
#~ Company = resolve_model('contacts.Company')
#Person = resolve_model('contacts.Person')
#company1 = Company.objects.get(name__startswith="Ausdemwald")
#dubois = Person.objects.get(last_name__startswith="Dubois")
furniture = products.ProductCat.objects.get(pk=1) # name="Furniture")
hosting = products.Product.objects.get(pk=5)
#~ order = Instantiator(sales.Order,
#~ "company creation_date start_date cycle imode",
#~ payment_term="30",journal=ORD).build
#~ invoice = Instantiator(sales.Invoice,
#~ "company creation_date imode",
#~ payment_term="30",journal=INV).build
o = ORD.create_document(
customer=Customer.objects.all()[0],
#~ company=Company.objects.get(pk=1),
creation_date=i2d(20080923),start_date=i2d(20080924),
cycle="M",imode=imode_e,
sales_remark="monthly order")
#~ o = order(1,"2008-09-23","2008-09-24","M","e",sales_remark="monthly order")
o.add_item(hosting,1)
yield o
o = ORD.create_document(
customer=Customer.objects.all()[1],
#~ company=Company.objects.get(pk=2),
creation_date=i2d(20080923),start_date=i2d(20080924),
cycle="M",imode=imode_e,
sales_remark="Customer 2 gets 50% discount")
#~ o = order(2,"2008-09-23","2008-09-24","M","e",
#~ sales_remark="Company 2 gets 50% discount")
o.add_item(hosting,1,discount=50)
yield o
utils.make_invoices(make_until=date(2008,10,28))
i = INV.create_document(
customer=Customer.objects.all()[1],
#~ company=Company.objects.get(pk=2),
creation_date=i2d(20081029),
imode=imode_e,
sales_remark="first manual invoice")
#~ i = invoice(2,"2008-10-29","e",
#~ sales_remark="first manual invoice")
i.add_item(1,1)
i.add_item(2,4)
yield i
utils.make_invoices(make_until=date(2009,04,11))
i = INV.create_document(
customer=Customer.objects.all()[2],
#~ company=Company.objects.get(pk=3),
creation_date=i2d(20090411),
imode=imode_e,
sales_remark="second manual invoice")
#~ i = invoice(3,date(2009,04,11),"e",
#~ sales_remark="second manual invoice")
i.add_item(3,1)
i.add_item(4,4)
yield i
#d = '2009-04-12'
#d = '20090412'
d = i2d(20090412)
#d = date(2009,4,12)
#~ o2 = order(4,d,d,"Y","p",sales_remark="yearly order")
o2 = ORD.create_document(
customer=Customer.objects.all()[3],
#~ company=Company.objects.get(pk=4),
creation_date=d,start_date=d,
cycle="Y",imode=imode_p,
sales_remark="yearly order")
o2.add_item(3,1)
o2.add_item(4,4)
#print o2
#o2.save()
yield o2
utils.make_invoices(make_until=d)
#~ i = invoice(4,date(2009,04,13),"e",
#~ sales_remark="third manual invoice with discount")
i = INV.create_document(
customer=Customer.objects.all()[3],
#~ company=Company.objects.get(pk=4),
creation_date=i2d(20090413),
imode=imode_e,
sales_remark="third manual invoice with discount")
i.add_item(3,1,discount=10)
i.add_item(4,4,discount=5)
yield i
utils.make_invoices(make_until=date(2009,05,14))
#~ order = Instantiator(sales.Order,journal=ORD,cycle='M',imode='e',payment_term="15").build
i = 0
for cust in Customer.objects.order_by('id'):
i += 1
#~ for i in range(10):
#for i in range(29):
#~ o = order(
#~ company=i+1,creation_date=date(2009,6,1+i),
#~ sales_remark="range demo #%d" % i)
o = ORD.create_document(
cycle='M',imode=imode_e,payment_term=pt15,
customer=cust,
#~ company=Company.objects.get(pk=i+1),
creation_date=date(2009,6,i),
sales_remark="range demo #%d" % i)
yield o
yield o.add_item(5,1,unit_price=1.7*i)
utils.make_invoices(make_until=date(2009,7,1))
utils.make_invoices(make_until=date(2009,8,1))
utils.make_invoices(make_until=date(2009,10,1))
| MaxTyutyunnikov/lino | obsolete/sales.old/fixtures/demo.py | Python | gpl-3.0 | 7,084 | 0.025692 |
"""Module which contains all spells that check something in a cgf file."""
# --------------------------------------------------------------------------
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, NIF File Format Library and Tools.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
# --------------------------------------------------------------------------
from tempfile import TemporaryFile
from pyffi.formats.cgf import CgfFormat
from pyffi.spells.cgf import CgfSpell
# XXX do something about this...
from pyffi.utils.mathutils import *
class SpellReadWrite(CgfSpell):
"""Like the original read-write spell, but with additional file size
check."""
SPELLNAME = "check_readwrite"
def dataentry(self):
self.toaster.msgblockbegin("writing to temporary file")
f_tmp = TemporaryFile()
try:
total_padding = self.data.write(f_tmp)
# comparing the files will usually be different because blocks may
# have been written back in a different order, so cheaply just compare
# file sizes
self.toaster.msg("comparing file sizes")
self.stream.seek(0, 2)
f_tmp.seek(0, 2)
if self.stream.tell() != f_tmp.tell():
self.toaster.msg("original size: %i" % self.stream.tell())
self.toaster.msg("written size: %i" % f_tmp.tell())
self.toaster.msg("padding: %i" % total_padding)
if self.stream.tell() > f_tmp.tell() or self.stream.tell() + total_padding < f_tmp.tell():
f_tmp.seek(0)
f_debug = open("debug.cgf", "wb")
f_debug.write(f_tmp.read(-1))
f_debug.close()
raise Exception('write check failed: file sizes differ by more than padding')
finally:
f_tmp.close()
self.toaster.msgblockend()
# spell is finished: prevent recursing into the tree
return False
class SpellCheckTangentSpace(CgfSpell):
"""This spell checks the tangent space calculation.
Only useful for debugging.
"""
SPELLNAME = "check_tangentspace"
SENSITIVITY = 0.1 # admissible float error (relative to one)
def datainspect(self):
return self.inspectblocktype(CgfFormat.MeshChunk)
def branchinspect(self, branch):
return isinstance(branch, (CgfFormat.MeshChunk, CgfFormat.NodeChunk))
def branchentry(self, branch):
if not isinstance(branch, CgfFormat.MeshChunk):
# keep recursing
return True
# get tangents and normals
if not (branch.normals_data and branch.tangents_data):
return True
oldtangents = [tangent for tangent in branch.tangents_data.tangents]
self.toaster.msg("recalculating new tangent space")
branch.update_tangent_space()
newtangents = [tangent for tangent in branch.tangents_data.tangents]
self.toaster.msgblockbegin("validating and checking old with new")
for norm, oldtangent, newtangent in zip(branch.normals_data.normals,
oldtangents, newtangents):
#self.toaster.msg("*** %s ***" % (norm,))
# check old
norm = (norm.x, norm.y, norm.z)
tan = tuple(x / 32767.0
for x in (oldtangent[0].x,
oldtangent[0].y,
oldtangent[0].z))
bin = tuple(x / 32767.0
for x in (oldtangent[1].x,
oldtangent[1].y,
oldtangent[1].z))
if abs(vecNorm(norm) - 1) > self.SENSITIVITY:
self.toaster.logger.warn("normal has non-unit norm")
if abs(vecNorm(tan) - 1) > self.SENSITIVITY:
self.toaster.logger.warn("oldtangent has non-unit norm")
if abs(vecNorm(bin) - 1) > self.SENSITIVITY:
self.toaster.logger.warn("oldbinormal has non-unit norm")
if (oldtangent[0].w != oldtangent[1].w):
raise ValueError(
"inconsistent oldtangent w coordinate (%i != %i)"
% (oldtangent[0].w, oldtangent[1].w))
if not (oldtangent[0].w in (-32767, 32767)):
raise ValueError(
"invalid oldtangent w coordinate (%i)" % oldtangent[0].w)
if oldtangent[0].w > 0:
cross = vecCrossProduct(tan, bin)
else:
cross = vecCrossProduct(bin, tan)
crossnorm = vecNorm(cross)
if abs(crossnorm - 1) > self.SENSITIVITY:
# a lot of these...
self.toaster.logger.warn("tan and bin not orthogonal")
self.toaster.logger.warn("%s %s" % (tan, bin))
self.toaster.logger.warn("(error is %f)"
% abs(crossnorm - 1))
cross = vecscalarMul(cross, 1.0/crossnorm)
if vecDistance(norm, cross) > self.SENSITIVITY:
self.toaster.logger.warn(
"norm not cross product of tangent and binormal")
#self.toaster.logger.warn("norm = %s" % (norm,))
#self.toaster.logger.warn("tan = %s" % (tan,))
#self.toaster.logger.warn("bin = %s" % (bin,))
#self.toaster.logger.warn("tan bin cross prod = %s" % (cross,))
self.toaster.logger.warn(
"(error is %f)" % vecDistance(norm, cross))
# compare old with new
if sum((abs(oldtangent[0].x - newtangent[0].x),
abs(oldtangent[0].y - newtangent[0].y),
abs(oldtangent[0].z - newtangent[0].z),
abs(oldtangent[0].w - newtangent[0].w),
abs(oldtangent[1].x - newtangent[1].x),
abs(oldtangent[1].y - newtangent[1].y),
abs(oldtangent[1].z - newtangent[1].z),
abs(oldtangent[1].w - newtangent[1].w))) > self.SENSITIVITY * 32767.0:
ntan = tuple(x / 32767.0 for x in (newtangent[0].x, newtangent[0].y, newtangent[0].z))
nbin = tuple(x / 32767.0 for x in (newtangent[1].x, newtangent[1].y, newtangent[1].z))
self.toaster.logger.warn("old and new tangents differ substantially")
self.toaster.logger.warn("old tangent")
self.toaster.logger.warn("%s %s" % (tan, bin))
self.toaster.logger.warn("new tangent")
self.toaster.logger.warn("%s %s" % (ntan, nbin))
self.toaster.msgblockend()
class SpellCheckHasVertexColors(CgfSpell):
"""This spell checks if a model has vertex colors.
Only useful for debugging.
"""
# example: farcry/FCData/Objects/Buildings/M03/compound_area/coa_instantshelter_door_cloth.cgf
SPELLNAME = "check_vcols"
def datainspect(self):
return self.inspectblocktype(CgfFormat.MeshChunk)
def branchinspect(self, branch):
return isinstance(branch, (CgfFormat.MeshChunk, CgfFormat.NodeChunk))
def branchentry(self, branch):
if isinstance(branch, CgfFormat.MeshChunk):
if branch.has_vertex_colors:
self.toaster.msg("has vertex colors!")
else:
# keep recursing
return True
| griest024/PokyrimTools | pyffi-develop/pyffi/spells/cgf/check.py | Python | mit | 9,081 | 0.002313 |
import atexit
import os
import tempfile
import time
import threading
import unittest
import nose.tools
import smqtk.utils.file_utils
__author__ = "paul.tunison@kitware.com"
class TestFileModificationMonitor (unittest.TestCase):
def _mk_test_fp(self):
fd, fp = tempfile.mkstemp()
os.close(fd)
atexit.register(lambda: os.remove(fp))
return fp
def test_monitor_stop(self):
# test that monitor stops when its told to
fp = self._mk_test_fp()
has_triggered = [False]
def cb(filepath):
has_triggered[0] = True
nose.tools.assert_equal(filepath, fp)
interval = 0.01
monitor = smqtk.utils.file_utils.FileModificationMonitor(fp, interval,
0.5, cb)
nose.tools.assert_true(monitor.stopped())
monitor.start()
try:
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_true(monitor.is_alive())
nose.tools.assert_false(monitor.stopped())
monitor.stop()
# If thread hasn't entered while loop yet, it will immediately kick
# out, otherwise its sleeping for the given interval.
monitor.join(interval*2)
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_false(monitor.is_alive())
finally:
if monitor.is_alive():
print "WARNING :: Forcing thread stop by removing filepath var"
monitor.filepath = None
def test_short_file_copy(self):
# where "short" means effectively instantaneous file creation / copy
# / touch.
#
# procedure:
# - create a file via mkstemp
# - create file monitor with detection callback and non-zero settle
# time.
# - touch file
# - check that callback was NOT triggered immediately
# - wait settle time / 2, check that cb NOT triggered yet
# - wait settle time / 4, check that cb NOT triggered yet
# - wait settle time / 4, check that cb HAS been called.
fp = self._mk_test_fp()
has_triggered = [False]
def cb(filepath):
has_triggered[0] = True
nose.tools.assert_equal(filepath, fp)
interval = 0.01
settle = 0.1
monitor = smqtk.utils.file_utils.FileModificationMonitor(fp, interval,
settle, cb)
try:
monitor.start()
# file not touched, should still be waiting
nose.tools.assert_equal(monitor.state, monitor.STATE_WAITING)
nose.tools.assert_false(has_triggered[0])
time.sleep(interval)
smqtk.utils.file_utils.touch(fp)
time.sleep(interval*2)
monitor._log.info('checking')
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_equal(monitor.state, monitor.STATE_WATCHING)
time.sleep(settle / 2.)
monitor._log.info('checking')
nose.tools.assert_equal(monitor.state, monitor.STATE_WATCHING)
nose.tools.assert_false(has_triggered[0])
time.sleep(settle / 4.)
monitor._log.info('checking')
nose.tools.assert_equal(monitor.state, monitor.STATE_WATCHING)
nose.tools.assert_false(has_triggered[0])
time.sleep(settle / 4.)
monitor._log.info('checking')
nose.tools.assert_true(has_triggered[0])
finally:
monitor.stop()
monitor.join()
def test_long_file_wait(self):
# procedure:
# - create a file via mkstemp
# - create file monitor with detection callback and non-zero settle
# time.
# - setup/start thread that appends to file at an interval that is
# less than settle time
# - wait and check that cb hasn't been called a few times
# - stop appender thread
# - check that cb called after settle period
fp = self._mk_test_fp()
has_triggered = [False]
append_interval = 0.02
monitor_interval = 0.01
monitor_settle = 0.1
def cb(filepath):
has_triggered[0] = True
nose.tools.assert_equal(filepath, fp)
class AppendThread (threading.Thread):
def __init__(self):
super(AppendThread, self).__init__()
self._s = threading.Event()
def stop(self):
self._s.set()
def stopped(self):
return self._s.is_set()
def run(self):
while not self.stopped():
with open(fp, 'a') as f:
f.write('0')
time.sleep(append_interval)
m_thread = smqtk.utils.file_utils.FileModificationMonitor(fp,
monitor_interval,
monitor_settle,
cb)
a_thread = AppendThread()
try:
m_thread.start()
a_thread.start()
time.sleep(monitor_settle)
m_thread._log.info('checking')
nose.tools.assert_false(m_thread.stopped())
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_equal(m_thread.state, m_thread.STATE_WATCHING)
time.sleep(monitor_settle)
m_thread._log.info('checking')
nose.tools.assert_false(m_thread.stopped())
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_equal(m_thread.state, m_thread.STATE_WATCHING)
a_thread.stop()
time.sleep(monitor_settle)
m_thread._log.info('checking')
nose.tools.assert_true(has_triggered[0])
finally:
a_thread.stop()
m_thread.stop()
def test_invalid_params(self):
fp = self._mk_test_fp()
# Invalid path value
nose.tools.assert_raises(
ValueError,
smqtk.utils.file_utils.FileModificationMonitor,
'/not/real', 1, 1, lambda p: None
)
# Invalid timers values
nose.tools.assert_raises(
ValueError,
smqtk.utils.file_utils.FileModificationMonitor,
fp, -1, 1, lambda p: None
)
nose.tools.assert_raises(
ValueError,
smqtk.utils.file_utils.FileModificationMonitor,
fp, 1, -1, lambda p: None
)
| Purg/SMQTK | python/smqtk/tests/utils/file_utils/test_FileModificationMonitor.py | Python | bsd-3-clause | 6,758 | 0.000444 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
def _console_dump(client, server_id):
try:
resp, output = client.get_console_output(server_id, None)
LOG.debug("Console Output for Server %s:\n%s" % (
server_id, output))
except exceptions.NotFound:
LOG.debug("Server %s: doesn't have a console" % server_id)
pass
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
"""Waits for a server to reach a given status."""
def _get_task_state(body):
if client.service == CONF.compute.catalog_v3_type:
task_state = body.get("os-extended-status:task_state", None)
else:
task_state = body.get('OS-EXT-STS:task_state', None)
return task_state
# NOTE(afazekas): UNKNOWN status possible on ERROR
# or in a very early stage.
resp, body = client.get_server(server_id)
old_status = server_status = body['status']
old_task_state = task_state = _get_task_state(body)
start_time = int(time.time())
timeout = client.build_timeout + extra_timeout
while True:
# NOTE(afazekas): Now the BUILD status only reached
# between the UNKNOWN->ACTIVE transition.
# TODO(afazekas): enumerate and validate the stable status set
if status == 'BUILD' and server_status != 'UNKNOWN':
return
if server_status == status:
if ready_wait:
if status == 'BUILD':
return
# NOTE(afazekas): The instance is in "ready for action state"
# when no task in progress
# NOTE(afazekas): Converted to string bacuse of the XML
# responses
if str(task_state) == "None":
# without state api extension 3 sec usually enough
time.sleep(CONF.compute.ready_wait)
return
else:
return
time.sleep(client.build_interval)
resp, body = client.get_server(server_id)
server_status = body['status']
task_state = _get_task_state(body)
if (server_status != old_status) or (task_state != old_task_state):
LOG.info('State transition "%s" ==> "%s" after %d second wait',
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if (server_status == 'ERROR') and raise_on_error:
_console_dump(client, server_id)
raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
if timed_out:
expected_task_state = 'None' if ready_wait else 'n/a'
message = ('Server %(server_id)s failed to reach %(status)s '
'status and task state "%(expected_task_state)s" '
'within the required time (%(timeout)s s).' %
{'server_id': server_id,
'status': status,
'expected_task_state': expected_task_state,
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
_console_dump(client, server_id)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
def wait_for_image_status(client, image_id, status):
"""Waits for an image to reach a given status.
The client should have a get_image(image_id) method to get the image.
The client should also have build_interval and build_timeout attributes.
"""
resp, image = client.get_image(image_id)
start = int(time.time())
while image['status'] != status:
time.sleep(client.build_interval)
resp, image = client.get_image(image_id)
if image['status'] == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
# check the status again to avoid a false negative where we hit
# the timeout at the same time that the image reached the expected
# status
if image['status'] == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Image %(image_id)s failed to reach %(status)s '
'status within the required time (%(timeout)s s).' %
{'image_id': image_id,
'status': status,
'timeout': client.build_timeout})
message += ' Current status: %s.' % image['status']
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
| vedujoshi/os_tempest | tempest/common/waiters.py | Python | apache-2.0 | 5,919 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-25 01:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Dia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Estado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Itinerario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
('texto_general', models.CharField(max_length=1000)),
('foto_general', models.ImageField(default='sitio/imagenes/none/no-img.png', upload_to='sitio/imagenes/')),
('fecha', models.DateTimeField()),
('fecha_salida', models.DateField()),
('fecha_llegada', models.DateField()),
('estado', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sitio.Estado')),
],
),
migrations.CreateModel(
name='Pais',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='itinerario',
name='pais_destino',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sitio.Pais'),
),
migrations.AddField(
model_name='itinerario',
name='usuario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='dia',
name='itinerario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sitio.Itinerario'),
),
]
| giocastagno/I.W._Delpuppo_Kopech_Castagno | turismo/sitio/migrations/0001_initial.py | Python | mit | 2,730 | 0.003297 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from erpnext.controllers.selling_controller import SellingController
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_delivery_note_serial_no
from frappe import _
from frappe.contacts.doctype.address.address import get_company_address
from frappe.desk.notifications import clear_doctype_notifications
from frappe.model.mapper import get_mapped_doc
from frappe.model.utils import get_fetch_values
from frappe.utils import cint, flt
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class DeliveryNote(SellingController):
def __init__(self, *args, **kwargs):
super(DeliveryNote, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_delivered',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_order',
'status_field': 'delivery_status',
'keyword': 'Delivered',
'second_source_dt': 'Sales Invoice Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'second_source_extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Invoice Item',
'join_field': 'si_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Invoice',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_invoice',
'overflow_type': 'delivery',
'no_tolerance': 1
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
'source_field': '-1 * qty',
'extra_cond': """ and exists (select name from `tabDelivery Note` where name=`tabDelivery Note Item`.parent and is_return=1)"""
}]
def before_print(self):
def toggle_print_hide(meta, fieldname):
df = meta.get_field(fieldname)
if self.get("print_without_amount"):
df.set("__print_hide", 1)
else:
df.delete_key("__print_hide")
item_meta = frappe.get_meta("Delivery Note Item")
print_hide_fields = {
"parent": ["grand_total", "rounded_total", "in_words", "currency", "total", "taxes"],
"items": ["rate", "amount", "discount_amount", "price_list_rate", "discount_percentage"]
}
for key, fieldname in print_hide_fields.items():
for f in fieldname:
toggle_print_hide(self.meta if key == "parent" else item_meta, f)
super(DeliveryNote, self).before_print()
def set_actual_qty(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
actual_qty = frappe.db.sql("""select actual_qty from `tabBin`
where item_code = %s and warehouse = %s""", (d.item_code, d.warehouse))
d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0
def so_required(self):
"""check in manage account if sales order required or not"""
if frappe.db.get_value("Selling Settings", None, 'so_required') == 'Yes':
for d in self.get('items'):
if not d.against_sales_order:
frappe.throw(_("Sales Order required for Item {0}").format(d.item_code))
def validate(self):
self.validate_posting_time()
super(DeliveryNote, self).validate()
self.set_status()
self.so_required()
self.validate_proj_cust()
self.check_close_sales_order("against_sales_order")
self.validate_for_items()
self.validate_warehouse()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.validate_with_previous_doc()
if self._action != 'submit' and not self.is_return:
set_batch_nos(self, 'warehouse', True)
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.update_current_stock()
if not self.installation_status: self.installation_status = 'Not Installed'
def validate_with_previous_doc(self):
super(DeliveryNote, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "against_sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Sales Invoice": {
"ref_dn_field": "against_sales_invoice",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Invoice Item": {
"ref_dn_field": "si_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) \
and not self.is_return:
self.validate_rate_with_reference_doc([["Sales Order", "against_sales_order", "so_detail"],
["Sales Invoice", "against_sales_invoice", "si_detail"]])
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_for_items(self):
check_list, chk_dupl_itm = [], []
if cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
return
for d in self.get('items'):
e = [d.item_code, d.description, d.warehouse, d.against_sales_order or d.against_sales_invoice, d.batch_no or '']
f = [d.item_code, d.description, d.against_sales_order or d.against_sales_invoice]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1:
if e in check_list:
frappe.msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
chk_dupl_itm.append(f)
def validate_warehouse(self):
super(DeliveryNote, self).validate_warehouse()
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == 1:
if not d['warehouse']:
frappe.throw(_("Warehouse required for stock Item {0}").format(d["item_code"]))
def update_current_stock(self):
if self.get("_action") and self._action != "update_after_submit":
for d in self.get('items'):
d.actual_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, "actual_qty")
for d in self.get('packed_items'):
bin_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, ["actual_qty", "projected_qty"], as_dict=True)
if bin_qty:
d.actual_qty = flt(bin_qty.actual_qty)
d.projected_qty = flt(bin_qty.projected_qty)
def on_submit(self):
self.validate_packed_qty()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
# update delivered qty in sales order
self.update_prevdoc_status()
self.update_billing_status()
if not self.is_return:
self.check_credit_limit()
elif self.issue_credit_note:
self.make_return_invoice()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
self.update_stock_ledger()
self.make_gl_entries()
def on_cancel(self):
self.check_close_sales_order("against_sales_order")
self.check_next_docstatus()
self.update_prevdoc_status()
self.update_billing_status()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
self.update_stock_ledger()
self.cancel_packing_slips()
self.make_gl_entries_on_cancel()
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
extra_amount = 0
validate_against_credit_limit = False
bypass_credit_limit_check_at_sales_order = cint(frappe.db.get_value("Customer", self.customer,
"bypass_credit_limit_check_at_sales_order"))
if bypass_credit_limit_check_at_sales_order:
validate_against_credit_limit = True
extra_amount = self.base_grand_total
else:
for d in self.get("items"):
if not (d.against_sales_order or d.against_sales_invoice):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company,
bypass_credit_limit_check_at_sales_order, extra_amount)
def validate_packed_qty(self):
"""
Validate that if packed qty exists, it should be equal to qty
"""
if not any([flt(d.get('packed_qty')) for d in self.get("items")]):
return
has_error = False
for d in self.get("items"):
if flt(d.get('qty')) != flt(d.get('packed_qty')):
frappe.msgprint(_("Packed quantity must equal quantity for Item {0} in row {1}").format(d.item_code, d.idx))
has_error = True
if has_error:
raise frappe.ValidationError
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.delivery_note = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Sales Invoice {0} has already been submitted").format(submit_rv[0][0]))
submit_in = frappe.db.sql("""select t1.name
from `tabInstallation Note` t1, `tabInstallation Note Item` t2
where t1.name = t2.parent and t2.prevdoc_docname = %s and t1.docstatus = 1""",
(self.name))
if submit_in:
frappe.throw(_("Installation Note {0} has already been submitted").format(submit_in[0][0]))
def cancel_packing_slips(self):
"""
Cancel submitted packing slips related to this delivery note
"""
res = frappe.db.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %s
AND docstatus = 1""", self.name)
if res:
for r in res:
ps = frappe.get_doc('Packing Slip', r[0])
ps.cancel()
frappe.msgprint(_("Packing Slip(s) cancelled"))
def update_status(self, status):
self.set_status(update=True, status=status)
self.notify_update()
clear_doctype_notifications(self)
def update_billing_status(self, update_modified=True):
updated_delivery_notes = [self.name]
for d in self.get("items"):
if d.si_detail and not d.so_detail:
d.db_set('billed_amt', d.amount, update_modified=update_modified)
elif d.so_detail:
updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified)
for dn in set(updated_delivery_notes):
dn_doc = self if (dn == self.name) else frappe.get_doc("Delivery Note", dn)
dn_doc.update_billing_percentage(update_modified=update_modified)
self.load_from_db()
def make_return_invoice(self):
try:
return_invoice = make_sales_invoice(self.name)
return_invoice.is_return = True
return_invoice.save()
return_invoice.submit()
frappe.msgprint(_("Credit Note {0} has been created automatically").format(return_invoice.name))
except:
frappe.throw(_("Could not create Credit Note automatically, please uncheck 'Issue Credit Note' and submit again"))
def update_billed_amount_based_on_so(so_detail, update_modified=True):
# Billed against Sales Order directly
billed_against_so = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where so_detail=%s and (dn_detail is null or dn_detail = '') and docstatus=1""", so_detail)
billed_against_so = billed_against_so and billed_against_so[0][0] or 0
# Get all Delivery Note Item rows against the Sales Order Item row
dn_details = frappe.db.sql("""select dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent
from `tabDelivery Note Item` dn_item, `tabDelivery Note` dn
where dn.name=dn_item.parent and dn_item.so_detail=%s
and dn.docstatus=1 and dn.is_return = 0
order by dn.posting_date asc, dn.posting_time asc, dn.name asc""", so_detail, as_dict=1)
updated_dn = []
for dnd in dn_details:
billed_amt_agianst_dn = 0
# If delivered against Sales Invoice
if dnd.si_detail:
billed_amt_agianst_dn = flt(dnd.amount)
billed_against_so -= billed_amt_agianst_dn
else:
# Get billed amount directly against Delivery Note
billed_amt_agianst_dn = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where dn_detail=%s and docstatus=1""", dnd.name)
billed_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0
# Distribute billed amount directly against SO between DNs based on FIFO
if billed_against_so and billed_amt_agianst_dn < dnd.amount:
pending_to_bill = flt(dnd.amount) - billed_amt_agianst_dn
if pending_to_bill <= billed_against_so:
billed_amt_agianst_dn += pending_to_bill
billed_against_so -= pending_to_bill
else:
billed_amt_agianst_dn += billed_against_so
billed_against_so = 0
frappe.db.set_value("Delivery Note Item", dnd.name, "billed_amt", billed_amt_agianst_dn, update_modified=update_modified)
updated_dn.append(dnd.parent)
return updated_dn
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Shipments'),
})
return list_context
def get_invoiced_qty_map(delivery_note):
"""returns a map: {dn_detail: invoiced_qty}"""
invoiced_qty_map = {}
for dn_detail, qty in frappe.db.sql("""select dn_detail, qty from `tabSales Invoice Item`
where delivery_note=%s and docstatus=1""", delivery_note):
if not invoiced_qty_map.get(dn_detail):
invoiced_qty_map[dn_detail] = 0
invoiced_qty_map[dn_detail] += qty
return invoiced_qty_map
def get_returned_qty_map_against_so(sales_orders):
"""returns a map: {so_detail: returned_qty}"""
returned_qty_map = {}
for name, returned_qty in frappe.get_all('Sales Order Item', fields = ["name", "returned_qty"],
filters = {'parent': ('in', sales_orders), 'docstatus': 1}, as_list=1):
if not returned_qty_map.get(name):
returned_qty_map[name] = 0
returned_qty_map[name] += returned_qty
return returned_qty_map
def get_returned_qty_map_against_dn(delivery_note):
"""returns a map: {so_detail: returned_qty}"""
returned_qty_map = frappe._dict(frappe.db.sql("""select dn_item.item_code, sum(abs(dn_item.qty)) as qty
from `tabDelivery Note Item` dn_item, `tabDelivery Note` dn
where dn.name = dn_item.parent
and dn.docstatus = 1
and dn.is_return = 1
and dn.return_against = %s
group by dn_item.item_code
""", delivery_note))
return returned_qty_map
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
doc = frappe.get_doc('Delivery Note', source_name)
sales_orders = [d.against_sales_order for d in doc.items]
returned_qty_map_against_so = get_returned_qty_map_against_so(sales_orders)
returned_qty_map_against_dn = get_returned_qty_map_against_dn(source_name)
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("set_po_nos")
if len(target.get("items")) == 0:
frappe.throw(_("All these items have already been invoiced"))
target.run_method("calculate_taxes_and_totals")
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Sales Invoice", 'company_address', target.company_address))
def update_item(source_doc, target_doc, source_parent):
target_doc.qty, returned_qty = get_pending_qty(source_doc)
if not source_doc.so_detail:
returned_qty_map_against_dn[source_doc.item_code] = returned_qty
if source_doc.serial_no and source_parent.per_billed > 0:
target_doc.serial_no = get_delivery_note_serial_no(source_doc.item_code,
target_doc.qty, source_parent.name)
def get_pending_qty(item_row):
pending_qty = item_row.qty - invoiced_qty_map.get(item_row.name, 0) - returned_qty_map_against_so.get(item_row.so_detail, 0)
returned_qty = flt(returned_qty_map_against_dn.get(item_row.item_code, 0))
if not item_row.so_detail:
if returned_qty >= pending_qty:
pending_qty = 0
returned_qty -= pending_qty
else:
pending_qty -= returned_qty
returned_qty = 0
return pending_qty, returned_qty
doc = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "dn_detail",
"parent": "delivery_note",
"so_detail": "so_detail",
"against_sales_order": "sales_order",
"serial_no": "serial_no",
"cost_center": "cost_center"
},
"postprocess": update_item,
"filter": lambda d: get_pending_qty(d)[0]<=0
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doc
@frappe.whitelist()
def make_delivery_trip(source_name, target_doc=None):
def update_stop_details(source_doc, target_doc, source_parent):
target_doc.customer = source_parent.customer
target_doc.address = source_parent.shipping_address_name
target_doc.customer_address = source_parent.shipping_address
target_doc.contact = source_parent.contact_person
target_doc.customer_contact = source_parent.contact_display
target_doc.grand_total = source_parent.grand_total
# Append unique Delivery Notes in Delivery Trip
delivery_notes.append(target_doc.delivery_note)
delivery_notes = []
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Delivery Trip",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Delivery Stop",
"field_map": {
"parent": "delivery_note"
},
"condition": lambda item: item.parent not in delivery_notes,
"postprocess": update_stop_details
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_installation_note(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.qty = flt(obj.qty) - flt(obj.installed_qty)
target.serial_no = obj.serial_no
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Installation Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Installation Note Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
},
"postprocess": update_item,
"condition": lambda doc: doc.installed_qty < doc.qty
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_packing_slip(source_name, target_doc=None):
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Packing Slip",
"field_map": {
"name": "delivery_note",
"letter_head": "letter_head"
},
"validation": {
"docstatus": ["=", 0]
}
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Delivery Note", source_name, target_doc)
@frappe.whitelist()
def update_delivery_note_status(docname, status):
dn = frappe.get_doc("Delivery Note", docname)
dn.update_status(status)
| ESS-LLP/erpnext-healthcare | erpnext/stock/doctype/delivery_note/delivery_note.py | Python | gpl-3.0 | 20,437 | 0.026423 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import subprocess
import sys
import tempfile
import time
import psutil
import ray
class RayTestTimeoutException(Exception):
"""Exception used to identify timeouts from test utilities."""
pass
def _pid_alive(pid):
"""Check if the process with this PID is alive or not.
Args:
pid: The pid to check.
Returns:
This returns false if the process is dead. Otherwise, it returns true.
"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def wait_for_pid_to_exit(pid, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if not _pid_alive(pid):
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process to exit.")
def wait_for_children_of_pid(pid, num_children=1, timeout=20):
p = psutil.Process(pid)
start_time = time.time()
while time.time() - start_time < timeout:
num_alive = len(p.children(recursive=False))
if num_alive >= num_children:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process children to start "
"({}/{} started).".format(num_alive, num_children))
def wait_for_children_of_pid_to_exit(pid, timeout=20):
children = psutil.Process(pid).children()
if len(children) == 0:
return
_, alive = psutil.wait_procs(children, timeout=timeout)
if len(alive) > 0:
raise RayTestTimeoutException(
"Timed out while waiting for process children to exit."
" Children still alive: {}.".format([p.name() for p in alive]))
def kill_process_by_name(name, SIGKILL=False):
for p in psutil.process_iter(attrs=["name"]):
if p.info["name"] == name:
if SIGKILL:
p.kill()
else:
p.terminate()
def run_string_as_driver(driver_script):
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
Returns:
The script's output.
"""
# Save the driver script as a file so we can call it using subprocess.
with tempfile.NamedTemporaryFile() as f:
f.write(driver_script.encode("ascii"))
f.flush()
out = ray.utils.decode(
subprocess.check_output(
[sys.executable, f.name], stderr=subprocess.STDOUT))
return out
def run_string_as_driver_nonblocking(driver_script):
"""Start a driver as a separate process and return immediately.
Args:
driver_script: A string to run as a Python script.
Returns:
A handle to the driver process.
"""
# Save the driver script as a file so we can call it using subprocess. We
# do not delete this file because if we do then it may get removed before
# the Python process tries to run it.
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(driver_script.encode("ascii"))
f.flush()
return subprocess.Popen(
[sys.executable, f.name], stdout=subprocess.PIPE)
def flat_errors():
errors = []
for job_errors in ray.errors(all_jobs=True).values():
errors.extend(job_errors)
return errors
def relevant_errors(error_type):
return [error for error in flat_errors() if error["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out waiting for {} {} errors.".format(
num_errors, error_type))
def wait_for_condition(condition_predictor,
timeout_ms=1000,
retry_interval_ms=100):
"""A helper function that waits until a condition is met.
Args:
condition_predictor: A function that predicts the condition.
timeout_ms: Maximum timeout in milliseconds.
retry_interval_ms: Retry interval in milliseconds.
Return:
Whether the condition is met within the timeout.
"""
time_elapsed = 0
while time_elapsed <= timeout_ms:
if condition_predictor():
return True
time_elapsed += retry_interval_ms
time.sleep(retry_interval_ms / 1000.0)
return False
def recursive_fnmatch(dirpath, pattern):
"""Looks at a file directory subtree for a filename pattern.
Similar to glob.glob(..., recursive=True) but also supports 2.7
"""
matches = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
| ujvl/ray-ng | python/ray/tests/utils.py | Python | apache-2.0 | 4,953 | 0 |
import pickle
commits = {}
def main():
output = ""
for commit in range(0,3):
output += "c*\n"
for file in range(0,2):
smells = str(commit+file*2)
output += "class"+str(file)+" smells="+smells+"\n"
result = open("mockpmdresult.txt","w")
result.write(output)
result.close()
if __name__ == "__main__":
main() | UBC-Victorious-410/project | tools/mock_pmd_parser.py | Python | mit | 330 | 0.060606 |
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Bill Mill <bill.mill@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 davidbrai <davidbrai@gmail.com> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PaginatedListBase:
def __init__(self):
self.__elements = list()
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __iter__(self):
for element in self.__elements:
yield element
while self._couldGrow():
newElements = self._grow()
for element in newElements:
yield element
def _isBiggerThan(self, index):
return len(self.__elements) > index or self._couldGrow()
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self._couldGrow():
self._grow()
def _grow(self):
newElements = self._fetchNextPage()
self.__elements += newElements
return newElements
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list[index]
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
class PaginatedList(PaginatedListBase):
"""
This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
You can simply enumerate through instances of this class::
for repo in user.get_repos():
print repo.name
You can also index them or take slices::
second_repo = user.get_repos()[1]
first_repos = user.get_repos()[:10]
If you want to iterate in reversed order, just do::
for repo in user.get_repos().reversed:
print repo.name
And if you really need it, you can explicitely access a specific page::
some_repos = user.get_repos().get_page(0)
some_other_repos = user.get_repos().get_page(3)
"""
def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None):
PaginatedListBase.__init__(self)
self.__requester = requester
self.__contentClass = contentClass
self.__firstUrl = firstUrl
self.__firstParams = firstParams or ()
self.__nextUrl = firstUrl
self.__nextParams = firstParams or {}
self.__headers = headers
if self.__requester.per_page != 30:
self.__nextParams["per_page"] = self.__requester.per_page
self._reversed = False
self.__totalCount = None
@property
def totalCount(self):
if not self.__totalCount:
self._grow()
return self.__totalCount
def _getLastPageUrl(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=self.__nextParams,
headers=self.__headers
)
links = self.__parseLinkHeader(headers)
lastUrl = links.get("last")
return lastUrl
@property
def reversed(self):
r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
r.__reverse()
return r
def __reverse(self):
self._reversed = True
lastUrl = self._getLastPageUrl()
if lastUrl:
self.__nextUrl = lastUrl
def _couldGrow(self):
return self.__nextUrl is not None
def _fetchNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__nextUrl,
parameters=self.__nextParams,
headers=self.__headers
)
data = data if data else []
self.__nextUrl = None
if len(data) > 0:
links = self.__parseLinkHeader(headers)
if self._reversed:
if "prev" in links:
self.__nextUrl = links["prev"]
elif "next" in links:
self.__nextUrl = links["next"]
self.__nextParams = None
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
content = [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data if element is not None
]
if self._reversed:
return content[::-1]
return content
def __parseLinkHeader(self, headers):
links = {}
if "link" in headers:
linkHeaders = headers["link"].split(", ")
for linkHeader in linkHeaders:
(url, rel) = linkHeader.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links
def get_page(self, page):
params = dict(self.__firstParams)
if page != 0:
params["page"] = page + 1
if self.__requester.per_page != 30:
params["per_page"] = self.__requester.per_page
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=params,
headers=self.__headers
)
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
return [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data
]
| cytec/SickRage | lib/github/PaginatedList.py | Python | gpl-3.0 | 7,862 | 0.003689 |
#!/usr/bin/env python
import sys
import os
import glob
import json
import re
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
def init_report_dir(metadata_dir, report_name):
report_dir = metadata_dir + '/reports/' + report_name
if not os.path.exists(report_dir):
os.makedirs(report_dir)
return report_dir
def generate_report(metadata_dir, report_name):
count_types = [
"live_aligned_embl-dkfz_variant_not_called_donors",
"live_embl-dkfz_variant_called_donors", # don't switch order
]
report_dir = init_report_dir(metadata_dir, report_name)
[dates, metadata_dirs] = get_metadata_dirs(metadata_dir, '2015-05-07')
data = [["Date", "To be called", "Called"]]
counts = []
today_donors = []
for ctype in count_types:
donor_counts = []
for md in metadata_dirs:
donors = set()
file_name_pattern = md + '/reports/gnos_repo_summary/' + ctype + '.*.txt'
files = glob.glob(file_name_pattern)
for f in files: donors.update(get_donors(f))
donor_counts.append(len(donors))
if len(donor_counts) == len(metadata_dirs):
today_donors.append(donors)
counts.append(donor_counts)
for i, d in enumerate(dates): data.append([d, counts[0][i], counts[1][i]])
with open(report_dir + '/summary_counts.json', 'w') as o: o.write(json.dumps(data))
compute_site_report_new(metadata_dir, report_dir, today_donors)
compute_site_report(metadata_dir, report_dir, today_donors)
def compute_site_report_new(metadata_dir, report_dir, today_donors):
compute_sites = {
"aws_ireland": set(),
"aws_oregon": set(),
"bsc": set(),
"dkfz": set(),
"ebi": set(),
"etri": set(),
"oicr": set(),
"pdc1_1": set(),
"pdc2_0": set(),
"tokyo": set(),
"ucsc": set(),
"sanger": set(),
"idash": set(),
"dkfz_hpc": set()
}
get_whitelists(compute_sites)
site_assigned_donors = set()
site_summary = {}
unassigned_uncalled_donors = set()
for c in compute_sites:
for d in compute_sites:
if c == d: continue
if compute_sites.get(c).intersection(compute_sites.get(d)):
# log overlap donors issue
print "WARN: overlap donors found between " + c + " and " + d \
+ ": " + ", ".join(compute_sites.get(c).intersection(compute_sites.get(d)))
if not site_summary.get(c):
site_summary[c] = {
'Called': len(compute_sites.get(c).intersection(today_donors[1])),
'To_be_called': len(compute_sites.get(c).intersection(today_donors[0])),
'Total': len(compute_sites.get(c))
}
site_assigned_donors.update(compute_sites.get(c))
# report WARN if the sum of Called and To_be_called not equal Total in site_summary
if not site_summary[c]['Called'] + site_summary[c]['To_be_called'] == site_summary[c]['Total']:
print "WARN: donors: " + ", ".join(compute_sites.get(c).difference(today_donors[0]).difference(today_donors[1])) + \
" found in whitelist are not ready to do embl-dkfz variant calling!!!"
site_summary['Unassigned'] = {
'Called': len(today_donors[1].difference(site_assigned_donors)),
'To_be_called': len(today_donors[0].difference(site_assigned_donors)),
'Total': len(today_donors[0].union(today_donors[1]).difference(site_assigned_donors))
}
unassigned_uncalled_donors = today_donors[0].difference(site_assigned_donors)
# today's counts
with open(report_dir + '/summary_compute_site_counts.json', 'w') as o: o.write(json.dumps(site_summary))
with open(report_dir + '/unassigned_uncalled_donors.txt', 'w') as o:
o.write('# Unassigned and uncalled donors\n')
o.write('# dcc_project_code' + '\t' + 'submitter_donor_id' + '\n')
o.write('\n'.join(unassigned_uncalled_donors) + '\n')
"""
# get all previous days counts
[dates, metadata_dirs] = get_metadata_dirs(metadata_dir, '2015-05-07')
site_summary_report = []
for i, md in reversed(list(enumerate(metadata_dirs))):
summary_site_count_file = md + '/reports/embl-dkfz_summary_counts/summary_compute_site_counts.json'
if not os.path.isfile(summary_site_count_file): continue
site_counts = json.load(open(summary_site_count_file))
site_summary_report.append([dates[i], site_counts])
with open(report_dir + '/hist_summary_compute_site_counts.json', 'w') as o: o.write(json.dumps(site_summary_report))
"""
def compute_site_report(metadata_dir, report_dir, today_donors):
compute_sites = {
"aws_ireland": set(),
"aws_oregon": set(),
"bsc": set(),
"dkfz": set(),
"ebi": set(),
"etri": set(),
"oicr": set(),
"pdc1_1": set(),
"pdc2_0": set(),
"tokyo": set(),
"ucsc": set(),
"sanger": set(),
"idash": set(),
"dkfz_hpc": set()
}
get_whitelists(compute_sites)
completed_donors = {}
site_assigned_donors = set()
for c in compute_sites:
for d in compute_sites:
if c == d: continue
if compute_sites.get(c).intersection(compute_sites.get(d)):
# log overlap donors issue
print "WARN: overlap donors found between " + c + " and " + d \
+ ": " + ", ".join(compute_sites.get(c).intersection(compute_sites.get(d)))
completed_donors[c] = compute_sites.get(c).intersection(today_donors[1])
site_assigned_donors.update(completed_donors[c])
site_not_assigned_donors = today_donors[1].difference(site_assigned_donors)
#print completed_donors
#print site_not_assigned_donors
site_summary = {}
for c in completed_donors: site_summary[c] = len(completed_donors.get(c))
# today's counts
with open(report_dir + '/summary_site_counts.json', 'w') as o: o.write(json.dumps(site_summary))
# get all previous days counts
[dates, metadata_dirs] = get_metadata_dirs(metadata_dir, '2015-05-07')
site_summary_report = []
for i, md in reversed(list(enumerate(metadata_dirs))):
summary_site_count_file = md + '/reports/embl-dkfz_summary_counts/summary_site_counts.json'
if not os.path.isfile(summary_site_count_file): continue
site_counts = json.load(open(summary_site_count_file))
site_summary_report.append([dates[i], site_counts])
with open(report_dir + '/hist_summary_site_counts.json', 'w') as o: o.write(json.dumps(site_summary_report))
def get_whitelists(compute_sites):
whitelist_dir = '../pcawg-operations/variant_calling/dkfz_embl_workflow/whitelists/'
for c in compute_sites:
files = glob.glob(whitelist_dir + '/' + c + '/' + c + '.*.txt')
for f in files: compute_sites.get(c).update(get_donors(f))
def get_donors(fname):
donors = []
with open(fname) as f:
for d in f:
donors.append(d.rstrip())
return donors
def get_metadata_dirs(metadata_dir, start_date='2015-01-11'):
dirs = sorted(glob.glob(metadata_dir + '/../20*_???'))
dir_name = os.path.basename(metadata_dir)
ret_dirs = []
ret_dates = []
start = False
for d in dirs:
if '../' + start_date in d: start = True
if not start: continue
ret_dates.append( str.split(os.path.basename(d),'_')[0] )
ret_dirs.append(d)
if dir_name == os.path.basename(d): break
return [ret_dates, ret_dirs]
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(description="PCAWG Report Generator Gathering Counts",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=True)
args = parser.parse_args()
metadata_dir = args.metadata_dir # this dir contains gnos manifest files, will also host all reports
if not os.path.isdir(metadata_dir): # TODO: should add more directory name check to make sure it's right
sys.exit('Error: specified metadata directory does not exist!')
report_name = re.sub(r'^pc_report-', '', os.path.basename(__file__))
report_name = re.sub(r'\.py$', '', report_name)
generate_report(metadata_dir, report_name)
return 0
if __name__ == "__main__":
sys.exit(main())
| ICGC-TCGA-PanCancer/pancancer-sandbox | pcawg_metadata_parser/pc_report-embl-dkfz_summary_counts.py | Python | gpl-2.0 | 8,651 | 0.007282 |
from django.template.defaultfilters import phone2numeric_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import render, setup
class Phone2numericTests(SimpleTestCase):
@setup({'phone2numeric01': '{{ a|phone2numeric }} {{ b|phone2numeric }}'})
def test_phone2numeric01(self):
output = render(
'phone2numeric01',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric02':
'{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}'})
def test_phone2numeric02(self):
output = render(
'phone2numeric02',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric03': '{{ a|phone2numeric }}'})
def test_phone2numeric03(self):
output = render(
'phone2numeric03',
{'a': 'How razorback-jumping frogs can level six piqued gymnasts!'},
)
self.assertEqual(
output,
'469 729672225-5867464 37647 226 53835 749 747833 49662787!'
)
class FunctionTests(SimpleTestCase):
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
| webostin/django-btc | tests/template_tests/filter_tests/test_phone2numeric.py | Python | bsd-3-clause | 1,450 | 0.002069 |
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. If nobody volunteers to write documentation and
make it more stable, this module will be removed in version 0.11.
"""
import string
import warnings
import numpy as np
from .utils import check_random_state
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publically.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, obs):
"""Compute the log probability under the model and compute posteriors
Implements rank and beam pruning in the forward-backward
algorithm to speed up inference in large models.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence `obs`
posteriors: array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the `obs`
See Also
--------
eval : Compute the log probability under the model and posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
_, posteriors = self.eval(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.eval(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in xrange(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string ''. Likewise, if you
would like just to do an initialization, call this method with
n_iter=0.
Parameters
----------
obs : list
List of array-like observation sequences (shape (n_i, n_features)).
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data, or
decreasing `covars_prior`.
**Please note that setting parameters in the `fit` method is
deprecated and will be removed in the next release.
Set it on initialization instead.**
"""
if kwargs:
warnings.warn("Setting parameters in the 'fit' method is"
"deprecated and will be removed in 0.14. Set it on "
"initialization instead.", DeprecationWarning,
stacklevel=2)
# initialisations for in case the user still adds parameters to fit
# so things don't break
for name in ('n_iter', 'thresh', 'params', 'init_params'):
if name in kwargs:
setattr(self, name, kwargs[name])
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in xrange(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats["trans"] += np.exp(logsumexp(lneta, 0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in xrange(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / cv_den
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in xrange(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asanyarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input containes negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can \
contain any combination of 's' for startprob, 't' for transmat, 'm' \
for means, and 'c' for covars, etc. Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,'m' for
means, and 'c' for covars, etc. Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in xrange(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.eval(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| mrshu/scikit-learn | sklearn/hmm.py | Python | bsd-3-clause | 46,104 | 0.000022 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
"sample spider"
class DmozItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
class JDItem(scrapy.Item):
category = scrapy.Field()
| Jetpie/web-scraper | commercial_web_navigation/commercial_web_navigation/items.py | Python | mit | 429 | 0.011655 |
from django.conf.urls import url
from api import views
urlpatterns = [
url(r'stations/$', views.get_stations, name='api_stations'),
url(r'entry/(?P<station_id>\d+)/$', views.make_entry, name='api_entry'),
url(r'new/$', views.add_station, name='api_add_station'),
# Booking api
url(r'booking/(?P<resident_id>\d+)/$', views.booking, name='api_booking'),
url(r'book_profile/$', views.book_profile, name='api_book_profile'),
url(r'book_phone/$', views.book_phone, name='api_book_phone'),
url(r'book_code/$', views.book_code, name='api_book_code'),
# Insure api
url(r'insure/$', views.insure, name='api_insure'),
# Drugshare api
url(r'register_pharm/$', views.register_pharm, name='api_register_pharm'),
url(r'make_token/(?P<device_id>\d+)/$',
views.make_token, name='api_make_token'),
url(r'add_device/$', views.add_device, name='api_add_device'),
url(r'get_profile/$', views.get_profile, name='api_get_profile'),
url(r'update_pharm/(?P<device_id>\d+)/$',
views.update_pharm, name='api_update_pharm'),
url(r'add_outlet/(?P<device_id>\d+)/$',
views.add_outlet, name='api_add_outlet'),
url(r'delete_outlet/(?P<id>\d+)/$',
views.delete_outlet, name='api_delete_outlet'),
url(r'add_drug/$', views.add_drug, name='api_add_drug'),
url(r'edit_drug/(?P<id>\d+)/$', views.edit_drug, name='api_edit_drug'),
url(r'search_drug/(?P<device_id>\d+)/$',
views.search_drug, name='api_search_drug'),
url(r'wish_drug/(?P<device_id>\d+)/$',
views.wishlist_drug, name='api_wishlist_drug'),
url(r'stock_drug/(?P<device_id>\d+)/$',
views.stock_drug, name='api_stock_drug'),
url(r'remove_drug/(?P<id>\d+)/$',
views.remove_drug, name='api_remove_drug'),
url(r'recent_drugs/(?P<count>\d+)/$',
views.recent_drugs, name='api_recent_drugs'),
url(r'request_drug/(?P<drug_id>\d+)/$',
views.request_drug, name='api_request_drug'),
url(r'pending/(?P<device_id>\d+)/$',
views.pending_requests, name='api_pending_requests'),
url(r'accept/(?P<request_id>\d+)/$', views.accept, name='api_accept'),
url(r'reject/(?P<request_id>\d+)/$', views.reject, name='api_reject'),
url(r'drug_list/$', views.list_generic_drugs, name='api_drugs_list'),
url(r'feedback/(?P<id>\d+)/$', views.feedback, name='api_feedback'),
]
| boyombo/django-stations | stations/api/urls.py | Python | mit | 2,383 | 0 |
#!/usr/bin/env python
"""
pitchanalysis.py
--
Christopher Kuech
cjkuech@gmail.com
--
Requires:
Python 2.7
Instructions:
python pitchanalysis.py [wav-file-name]
"""
import matplotlib
from math import log
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy as np
import pyaudio
import sys
from time import time, sleep
import Tkinter as tk
import wavelab
(WIDTH, HEIGHT) = (800, 500)
FNAME = './Bach.wav' if len(sys.argv) != 2 else sys.argv[1]
font = ('Helvetica', 14, 'bold')
CHUNK = 1024
def audioworker():
"""the function run on the audio thread"""
global frame
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(2),
channels=1, rate=4*44100, output=True)
# unknown why rate is off by 4x
while True:
stream.write(data[frame:frame+CHUNK].tostring())
frame = (frame + CHUNK) % len(wav)
stream.stop_stream()
stream.close()
p.terminate()
def graphicsworker():
"""the function run on the graphics thread"""
while True:
start = time()
p = ptype.get()
w = wsize.get()
wty = wtype.get()
# compute frequencies from clip
clip = data[frame:frame+w]
if wty == "hanning":
clip *= np.hanning(w)
elif wty == "hamming":
clip *= np.hamming(w)
freqs = wavelab.frequencies(clip)
# update plot
xs = np.sort(freqs.keys())
ys = np.array(map(freqs.get, xs))
axes.cla()
(xmax, ymin, ymax) = (10e4, 0.000001, 10e2)
# (xlim, ylim) = (_, (ymin, ymax)) = ((0, 1e4), (1e-3, 1e7))
axes.set_xscale("log")
axes.set_yscale("linear")
axes.set_xlim((1, xmax))
if p == "square":
# axes.set_yscale("linear")
axes.set_ylim((ymin**2, ymax**2))
ys = ys * ys
elif p == "dB":
# axes.set_yscale("log")
axes.set_ylim((log(ymin), log(ymax)))
ys = np.log(ys)
elif p == "-dB":
# axes.set_yscale("log")
axes.set_ylim((-log(ymax), -log(ymin)))
ys = -np.log(ys)
elif p == "linear":
# axes.set_yscale("linear")
axes.set_ylim((ymin, ymax))
axes.plot(xs, ys, 'r-')
canvas.show()
# pitch tracker
freq = max(freqs, key=lambda f: freqs[f])
pitch.set(wavelab.pitch(freq).replace('/','\n'))
# attempt to achieve 30fps animation (at best)
dt = time() - start
sleep(max(0, 1.0/30.0 - dt))
# read wave file
(framerate, wav) = wavelab.readwav(FNAME)
data = np.concatenate((wav, wav)) # avoid out of bounds
frame = 0
# create a GUI instance (do before any use of Tkinter)
root = tk.Tk()
root.wm_title("Frequency Spectrogram")
# these objects hold the variables from the widgets
wsize = tk.IntVar() # window size (in frames)
wsize.set(2205)
wtype = tk.StringVar() # type of windowing to use
wtype.set("rectangle")
ptype = tk.StringVar() # type of power to use
ptype.set("square")
pitch = tk.StringVar() # the current pitch
pitch.set("")
widgetps = lambda n, v: {'variable': v, 'text': n, 'value': n}
# returns the dict of kwargs that initialize a widget
# create the canvas widget and add it to the GUI
# canvas = tk.Canvas(root, borderwidth=0, width=WIDTH, height=HEIGHT, bg='#000')
# canvas.grid(row=0, column=0, columnspan=4)
# canvas.show()
canvasframe = tk.Frame(root, width=WIDTH, height=HEIGHT)
canvasframe.grid(row=0, column=0, columnspan=4)
figure = Figure()
axes = figure.add_axes( (0.1, 0.1, 0.8, 0.8), frameon=True,
xlabel="Frequency (Hz)", ylabel="Power")
canvas = FigureCanvasTkAgg(figure, canvasframe)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.show()
# create the wtype controller and add it to the GUI
tk.Label(root, font=font, text="Windowing").grid(row=1, column=0, pady=10)
wframe = tk.Frame(root)
wframe.grid(row=2, column=0, pady=10, sticky="n")
tk.Radiobutton(wframe, **widgetps("rectangle", wtype)).grid(sticky="w", row=0)
tk.Radiobutton(wframe, **widgetps("hamming" , wtype)).grid(sticky="w", row=1)
tk.Radiobutton(wframe, **widgetps("hanning" , wtype)).grid(sticky="w", row=2)
# create the wsize controller and add it to the GUI
tk.Label(root, font=font, text="Window Size").grid(row=1, column=1, pady=10)
tk.Scale(root, variable=wsize, orient=tk.HORIZONTAL, from_=10, to=4410).grid(row=2, column=1, sticky="wen")
# create the ptype controller and add it to the GUI
tk.Label(root, font=font, text="Power").grid(row=1, column=2, pady=10)
pframe = tk.Frame(root)
pframe.grid(row=2, column=2, pady=10, sticky="n")
tk.Radiobutton(pframe, **widgetps("square", ptype)).grid(sticky="w", row=0)
tk.Radiobutton(pframe, **widgetps("dB", ptype)).grid(sticky="w", row=1)
tk.Radiobutton(pframe, **widgetps("-dB", ptype)).grid(sticky="w", row=2)
tk.Radiobutton(pframe, **widgetps("linear", ptype)).grid(sticky="w", row=3)
# create the area where the pitchlabel is displayed
tk.Label(root, font=font, text="Pitch").grid(row=1, column=3, pady=10)
(fontfamily, fontsize, fontweight) = font
pitchfont = (fontfamily, 24, fontweight)
pitchlabel = tk.Label(root, font=pitchfont, textvariable=pitch, width=7).grid(row=2, column=3)
# start the other threads
wavelab.thread(audioworker)
wavelab.thread(graphicsworker)
# start the main update loop for the GUI (and block)
tk.mainloop()
| chriskuech/wavelab | pitchanalysis.py | Python | mit | 5,174 | 0.018748 |
# -*- coding: UTF-8 -*-
import os
from behave_common_steps import dummy, App
from dogtail.config import config
from time import sleep, localtime, strftime
import problem
import shutil
def before_all(context):
"""Setup nautilus stuff
Being executed before all features
"""
try:
# Cleanup abrt crashes
[x.delete() for x in problem.list()]
# Do the cleanup
os.system("python cleanup.py > /dev/null")
# Skip dogtail actions to print to stdout
config.logDebugToStdOut = False
config.typingDelay = 0.2
# Include assertion object
context.assertion = dummy()
# Kill initial setup
os.system("killall /usr/libexec/gnome-initial-setup")
# Store scenario start time for session logs
context.log_start_time = strftime("%Y-%m-%d %H:%M:%S", localtime())
context.app = App('nautilus', forceKill=False)
except Exception as e:
print("Error in before_all: %s" % e.message)
def after_step(context, step):
"""Teardown after each step.
Here we make screenshot and embed it (if one of formatters supports it)
"""
try:
if problem.list():
problems = problem.list()
for crash in problems:
if hasattr(context, "embed"):
context.embed('text/plain', "abrt has detected a crash: %s" % crash.reason)
else:
print("abrt has detected a crash: %s" % crash.reason)
# Crash was stored, so it is safe to remove it now
[x.delete() for x in problems]
# Make screnshot if step has failed
if hasattr(context, "embed"):
os.system("gnome-screenshot -f /tmp/screenshot.jpg")
context.embed('image/jpg', open("/tmp/screenshot.jpg", 'r').read())
# Test debugging - set DEBUG_ON_FAILURE to drop to ipdb on step failure
if os.environ.get('DEBUG_ON_FAILURE'):
import ipdb; ipdb.set_trace() # flake8: noqa
except Exception as e:
print("Error in after_step: %s" % e.message)
def after_scenario(context, scenario):
"""Teardown for each scenario
Kill nautilus (in order to make this reliable we send sigkill)
"""
try:
# Stop nautilus
os.system("killall nautilus &> /dev/null")
# Attach journalctl logs
if hasattr(context, "embed"):
os.system("sudo journalctl /usr/bin/gnome-session --no-pager -o cat --since='%s'> /tmp/journal-session.log" % context.log_start_time)
data = open("/tmp/journal-session.log", 'r').read()
if data:
context.embed('text/plain', data)
if hasattr(context, 'temp_dir'):
shutil.rmtree(context.temp_dir)
# Make some pause after scenario
sleep(1)
except Exception as e:
# Stupid behave simply crashes in case exception has occurred
print("Error in after_scenario: %s" % e.message)
| fedora-desktop-tests/nautilus | features/environment.py | Python | gpl-2.0 | 3,002 | 0.001332 |
#!/usr/bin/env python
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
from tests import mongoutils
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
},
MONGO_DATABASES={
'default': {
'name': 'dumb',
},
},
INSTALLED_APPS=(
'tests',
),
MIDDLEWARE_CLASSES=(),
ROOT_URLCONF=None,
SECRET_KEY='foobar',
TEST_RUNNER='tests.mongoutils.TestRunner'
)
def runtests():
mongoutils.mongo_connect()
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
| qwiglydee/drf-mongo-filters | runtests.py | Python | gpl-2.0 | 751 | 0.001332 |
from lcs.agents import PerceptionString
from lcs.representations import UBR
class TestPerceptionString:
def test_should_initialize_with_defaults(self):
assert len(PerceptionString("foo")) == 3
assert len(PerceptionString(['b', 'a', 'r'])) == 3
def test_should_create_empty_with_defaults(self):
# when
ps = PerceptionString.empty(3)
# then
assert len(ps) == 3
assert repr(ps) == '###'
def test_should_create_empty_for_ubr(self):
# given
length = 3
wildcard = UBR(0, 16)
# when
ps = PerceptionString.empty(length, wildcard, oktypes=(UBR,))
# then
assert len(ps) == 3
assert ps[0] == ps[1] == ps[2] == wildcard
assert ps[0] is not ps[1] is not ps[2]
def test_should_safely_modify_single_attribute(self):
# given
length = 3
wildcard = UBR(0, 16)
ps = PerceptionString.empty(length, wildcard, oktypes=(UBR, ))
# when
ps[0].x1 = 2
# then (check if objects are not stored using references)
assert ps[1].x1 == 0
| khozzy/pyalcs | tests/lcs/agents/test_PerceptionString.py | Python | gpl-3.0 | 1,123 | 0 |
import json
import os
from textwrap import dedent
import boto3
import moto
import pytest
from moto.ec2 import ec2_backend
from moto.ec2 import utils as ec2_utils
from ecs_deplojo.connection import Connection
from ecs_deplojo.task_definitions import TaskDefinition
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.yield_fixture(scope="function")
def cluster():
with moto.mock_ecs(), moto.mock_ec2():
boto3.setup_default_session(region_name="eu-west-1")
ec2 = boto3.resource("ec2", region_name="eu-west-1")
ecs = boto3.client("ecs", region_name="eu-west-1")
known_amis = list(ec2_backend.describe_images())
test_instance = ec2.create_instances(
ImageId=known_amis[0].id, MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
cluster = ecs.create_cluster(clusterName="default")
ecs.register_container_instance(
cluster="default", instanceIdentityDocument=instance_id_document
)
yield cluster
@pytest.fixture
def connection(cluster):
return Connection()
@pytest.fixture
def definition():
path = os.path.join(BASE_DIR, "files/default_taskdef.json")
with open(path, "r") as json_file:
return TaskDefinition(json.load(json_file))
@pytest.fixture
def default_config():
path = os.path.join(BASE_DIR, "files/default_config.yml")
with open(path, "r") as fh:
yield fh
@pytest.fixture
def example_project(tmpdir):
data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "web-1",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
},
{
"name": "web-2",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(data)
data = dedent(
"""
---
cluster_name: default
environment:
DATABASE_URL: postgresql://
environment_groups:
group-1:
ENV_CODE: 12345
task_definitions:
web:
template: %(template_filename)s
environment_group: group-1
task_role_arn: my-test
overrides:
web-1:
memory: 512
portMappings:
- hostPort: 0
containerPort: 8080
protocol: tcp
services:
web:
task_definition: web
before_deploy:
- task_definition: web
container: web-1
command: manage.py migrate --noinput
after_deploy:
- task_definition: web
container: web-1
command: manage.py clearsessions
"""
% {"template_filename": filename.strpath}
)
filename = tmpdir.join("config.yml")
filename.write(data)
return filename
| LabD/ecs-deplojo | tests/conftest.py | Python | mit | 3,378 | 0 |
''' -- imports from python libraries -- '''
# from datetime import datetime
import datetime
import json
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect # , HttpResponse uncomment when to use
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render_to_response # , render uncomment when to use
from django.template import RequestContext
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT, GSTUDIO_TASK_TYPES
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.ndf.models import Node, AttributeType, RelationType
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.templatetags.ndf_tags import edit_drawer_widget
from gnowsys_ndf.ndf.views.methods import get_node_common_fields, parse_template_data, get_execution_time, delete_node
from gnowsys_ndf.ndf.views.notify import set_notif_val
from gnowsys_ndf.ndf.views.methods import get_property_order_with_value
from gnowsys_ndf.ndf.views.methods import create_gattribute, create_grelation, create_task
GST_COURSE = node_collection.one({'_type': "GSystemType", 'name': GAPPS[7]})
app = GST_COURSE
# @login_required
@get_execution_time
def course(request, group_id, course_id=None):
"""
* Renders a list of all 'courses' available within the database.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
if course_id is None:
course_ins = node_collection.find_one({'_type': "GSystemType", "name": "Course"})
if course_ins:
course_id = str(course_ins._id)
if request.method == "POST":
# Course search view
title = GST_COURSE.name
search_field = request.POST['search_field']
course_coll = node_collection.find({'member_of': {'$all': [ObjectId(GST_COURSE._id)]},
'$or': [
{'$and': [
{'name': {'$regex': search_field, '$options': 'i'}},
{'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [{'access_policy': u"PRIVATE"}, {'created_by': request.user.id}]}
]
}
]
},
{'$and': [
{'tags': {'$regex': search_field, '$options': 'i'}},
{'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [{'access_policy': u"PRIVATE"}, {'created_by': request.user.id}]}
]
}
]
}
],
'group_set': {'$all': [ObjectId(group_id)]}
}).sort('last_update', -1)
# course_nodes_count = course_coll.count()
return render_to_response("ndf/course.html",
{'title': title,
'appId': app._id,
'searching': True, 'query': search_field,
'course_coll': course_coll, 'groupid': group_id, 'group_id':group_id
},
context_instance=RequestContext(request)
)
else:
# Course list view
title = GST_COURSE.name
course_coll = node_collection.find({'member_of': {'$all': [ObjectId(course_id)]},
'group_set': {'$all': [ObjectId(group_id)]},
'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [
{'access_policy': u"PRIVATE"},
{'created_by': request.user.id}
]
}
]
})
template = "ndf/course.html"
variable = RequestContext(request, {'title': title, 'course_nodes_count': course_coll.count(), 'course_coll': course_coll, 'groupid':group_id, 'appId':app._id, 'group_id':group_id})
return render_to_response(template, variable)
@login_required
@get_execution_time
def create_edit(request, group_id, node_id=None):
"""Creates/Modifies details about the given quiz-item.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
context_variables = {'title': GST_COURSE.name,
'group_id': group_id,
'groupid': group_id
}
if node_id:
course_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(node_id)})
else:
course_node = node_collection.collection.GSystem()
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(GST_COURSE._id),'group_set': ObjectId(group_id) })
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
if request.method == "POST":
# get_node_common_fields(request, course_node, group_id, GST_COURSE)
course_node.save(is_changed=get_node_common_fields(request, course_node, group_id, GST_COURSE))
return HttpResponseRedirect(reverse('course', kwargs={'group_id': group_id}))
else:
if node_id:
context_variables['node'] = course_node
context_variables['groupid'] = group_id
context_variables['group_id'] = group_id
context_variables['appId'] = app._id
context_variables['nodes_list'] = json.dumps(nodes_list)
return render_to_response("ndf/course_create_edit.html",
context_variables,
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def course_detail(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
course_structure_exists = False
title = GST_COURSE.name
course_node = node_collection.one({"_id": ObjectId(_id)})
if course_node.collection_set:
course_structure_exists = True
return render_to_response("ndf/course_detail.html",
{'node': course_node,
'groupid': group_id,
'group_id': group_id,
'appId': app._id,
'title':title,
'course_structure_exists': course_structure_exists
},
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def course_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Creates/Modifies document of given sub-types of Course(s).
"""
auth = None
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if auth:
group_id = str(auth._id)
else:
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
# app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
mis_admin = None
property_order_list = []
template = ""
template_prefix = "mis"
if request.user:
if auth is None:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
agency_type = auth.agency_type
agency_type_node = node_collection.one({
'_type': "GSystemType", 'name': agency_type
}, {
'collection_set': 1
})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(
node_collection.one({
"_id": eachset
}, {
'_id': 1, 'name': 1, 'type_of': 1
})
)
if app_set_id:
course_gst = node_collection.one({
'_type': "GSystemType", '_id': ObjectId(app_set_id)
}, {
'name': 1, 'type_of': 1
})
template = "ndf/" + course_gst.name.strip().lower().replace(' ', '_') \
+ "_create_edit.html"
title = course_gst.name
if app_set_instance_id:
course_gs = node_collection.one({
'_type': "GSystem", '_id': ObjectId(app_set_instance_id)
})
else:
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
property_order_list = get_property_order_with_value(course_gs)
if request.method == "POST":
# [A] Save course-node's base-field(s)
start_time = ""
if "start_time" in request.POST:
start_time = request.POST.get("start_time", "")
start_time = datetime.datetime.strptime(start_time, "%m/%Y")
end_time = ""
if "end_time" in request.POST:
end_time = request.POST.get("end_time", "")
end_time = datetime.datetime.strptime(end_time, "%m/%Y")
nussd_course_type = ""
if "nussd_course_type" in request.POST:
nussd_course_type = request.POST.get("nussd_course_type", "")
nussd_course_type = unicode(nussd_course_type)
unset_ac_options = []
if "unset-ac-options" in request.POST:
unset_ac_options = request.POST.getlist("unset-ac-options")
else:
# Just to execute loop at least once for Course Sub-Types
# other than 'Announced Course'
unset_ac_options = ["dummy"]
if course_gst.name == u"Announced Course":
announce_to_colg_list = request.POST.get(
"announce_to_colg_list", ""
)
announce_to_colg_list = [ObjectId(colg_id) for colg_id in announce_to_colg_list.split(",")]
colg_ids = []
# Parsing ObjectId -- from string format to ObjectId
for each in announce_to_colg_list:
if each and ObjectId.is_valid(each):
colg_ids.append(ObjectId(each))
# Fetching college(s)
colg_list_cur = node_collection.find({
'_id': {'$in': colg_ids}
}, {
'name': 1, 'attribute_set.enrollment_code': 1
})
if "_id" in course_gs:
# It means we are in editing mode of given Announced Course GSystem
unset_ac_options = [course_gs._id]
ac_nc_code_list = []
# Prepare a list
# 0th index (ac_node): Announced Course node,
# 1st index (nc_id): NUSSD Course node's ObjectId,
# 2nd index (nc_course_code): NUSSD Course's code
for cid in unset_ac_options:
ac_node = None
nc_id = None
nc_course_code = ""
# Here course_gst is Announced Course GSytemType's node
ac_node = node_collection.one({
'_id': ObjectId(cid), 'member_of': course_gst._id
})
# If ac_node found, means
# (1) we are dealing with creating Announced Course
# else,
# (2) we are in editing phase of Announced Course
course_node = None
if not ac_node:
# In this case, cid is of NUSSD Course GSystem
# So fetch that to extract course_code
# Set to nc_id
ac_node = None
course_node = node_collection.one({
'_id': ObjectId(cid)
})
else:
# In this case, fetch NUSSD Course from
# Announced Course GSystem's announced_for relationship
for rel in ac_node.relation_set:
if "announced_for" in rel:
course_node_ids = rel["announced_for"]
break
# Fetch NUSSD Course GSystem
if course_node_ids:
course_node = node_collection.find_one({
"_id": {"$in": course_node_ids}
})
# If course_code doesn't exists then
# set NUSSD Course GSystem's name as course_code
if course_node:
nc_id = course_node._id
for attr in course_node.attribute_set:
if "course_code" in attr:
nc_course_code = attr["course_code"]
break
if not nc_course_code:
nc_course_code = course_node.name.replace(" ", "-")
# Append to ac_nc_code_list
ac_nc_code_list.append([ac_node, nc_id, nc_course_code])
# For each selected college
# Create Announced Course GSystem
for college_node in colg_list_cur:
# Fetch Enrollment code from "enrollment_code" (Attribute)
college_enrollment_code = ""
if college_node:
for attr in college_node.attribute_set:
if attr and "enrollment_code" in attr:
college_enrollment_code = attr["enrollment_code"]
break
ann_course_id_list = []
# For each selected course to Announce
for ac_nc_code in ac_nc_code_list:
course_gs = ac_nc_code[0]
nc_id = ac_nc_code[1]
nc_course_code = ac_nc_code[2]
if not course_gs:
# Create new Announced Course GSystem
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + "_" + college_enrollment_code + "_"
+ start_time.strftime("%b_%Y") + "-"
+ end_time.strftime("%b_%Y")
)
request.POST["name"] = c_name
is_changed = get_node_common_fields(
request, course_gs, group_id, course_gst
)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({
'_id': field_set['_id']
})
field_instance_type = type(field_instance)
if (field_instance_type in
[AttributeType, RelationType]):
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
if field_instance["name"] in ["start_time", "end_time"]:
# Course Duration
field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(course_gs._id, node_collection.collection.AttributeType(field_instance), field_value)
else:
# i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(nc_id)
# Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
# Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(course_gs._id, node_collection.collection.RelationType(field_instance), field_value)
ann_course_id_list.append(course_gs._id)
else:
is_changed = get_node_common_fields(request, course_gs, group_id, course_gst)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({'_id': field_set['_id']})
field_instance_type = type(field_instance)
if field_instance_type in [AttributeType, RelationType]:
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
# if field_instance["name"] in ["start_time","end_time"]:
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
# elif field_instance["name"] in ["start_enroll", "end_enroll"]: #Student Enrollment DUration
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y")
if field_instance["name"] in ["mast_tr_qualifications", "voln_tr_qualifications"]:
# Needs sepcial kind of parsing
field_value = []
tr_qualifications = request.POST.get(field_instance["name"], '')
if tr_qualifications:
qualifications_dict = {}
tr_qualifications = [qual.strip() for qual in tr_qualifications.split(",")]
for i, qual in enumerate(tr_qualifications):
if (i % 2) == 0:
if qual == "true":
qualifications_dict["mandatory"] = True
elif qual == "false":
qualifications_dict["mandatory"] = False
else:
qualifications_dict["text"] = unicode(qual)
field_value.append(qualifications_dict)
qualifications_dict = {}
elif field_instance["name"] in ["max_marks", "min_marks"]:
# Needed because both these fields' values are dependent upon evaluation_type field's value
evaluation_type = request.POST.get("evaluation_type", "")
if evaluation_type == u"Continuous":
field_value = None
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(
course_gs._id,
node_collection.collection.AttributeType(field_instance),
field_value
)
else:
#i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(cid)
#Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
#Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(
course_gs._id,
node_collection.collection.RelationType(field_instance),
field_value
)
return HttpResponseRedirect(
reverse(
app_name.lower() + ":" + template_prefix + '_app_detail',
kwargs={
'group_id': group_id, "app_id": app_id,
"app_set_id": app_set_id
}
)
)
univ = node_collection.one({
'_type': "GSystemType", 'name': "University"
}, {
'_id': 1
})
university_cur = None
if not mis_admin:
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1, 'name': 1, 'group_admin': 1}
)
if univ and mis_admin:
university_cur = node_collection.find(
{'member_of': univ._id, 'group_set': mis_admin._id},
{'name': 1}
).sort('name', 1)
default_template = "ndf/course_create_edit.html"
context_variables = {
'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name,
'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'title': title,
'university_cur': university_cur,
'property_order_list': property_order_list
}
if app_set_instance_id:
course_gs.get_neighbourhood(course_gs.member_of)
context_variables['node'] = course_gs
if "Announced Course" in course_gs.member_of_names_list:
for attr in course_gs.attribute_set:
if attr:
for eachk, eachv in attr.items():
context_variables[eachk] = eachv
for rel in course_gs.relation_set:
if rel:
for eachk, eachv in rel.items():
if eachv:
get_node_name = node_collection.one({'_id': eachv[0]})
context_variables[eachk] = get_node_name.name
try:
return render_to_response(
[template, default_template],
context_variables, context_instance=RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseCreateEditViewError: This html template (" \
+ str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseCreateEditViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
@login_required
@get_execution_time
def mis_course_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Detail view of NUSSD Course/ Announced Course
"""
# print "\n Found course_detail n gone inn this...\n\n"
auth = None
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
# app_name = "mis"
app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
nodes = None
node = None
property_order_list = []
property_order_list_ac = []
is_link_needed = True # This is required to show Link button on interface that link's Student's/VoluntaryTeacher's node with it's corresponding Author node
template_prefix = "mis"
context_variables = {}
#Course structure collection _dict
course_collection_dict = {}
course_collection_list = []
course_structure_exists = False
if request.user:
if auth is None:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username)})
if auth:
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
if app_set_id:
course_gst = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
title = course_gst.name
template = "ndf/course_list.html"
if request.method == "POST":
search = request.POST.get("search", "")
classtype = request.POST.get("class", "")
# nodes = list(node_collection.find({'name':{'$regex':search, '$options': 'i'},'member_of': {'$all': [course_gst._id]}}))
nodes = node_collection.find({'member_of': course_gst._id, 'name': {'$regex': search, '$options': 'i'}})
else:
nodes = node_collection.find({'member_of': course_gst._id, 'group_set': ObjectId(group_id)})
if app_set_instance_id:
template = "ndf/course_details.html"
node = node_collection.one({'_type': "GSystem", '_id': ObjectId(app_set_instance_id)})
property_order_list = get_property_order_with_value(node)
node.get_neighbourhood(node.member_of)
if title == u"Announced Course":
property_order_list_ac = node.attribute_set
# Course structure as list of dicts
if node.collection_set:
course_structure_exists = True
context_variables = { 'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name, 'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'course_gst_name': course_gst.name,
'title': title,
'course_structure_exists': course_structure_exists,
'nodes': nodes, 'node': node,
'property_order_list': property_order_list,
'property_order_list_ac': property_order_list_ac,
'is_link_needed': is_link_needed
}
try:
# print "\n template-list: ", [template, default_template]
# template = "ndf/fgh.html"
# default_template = "ndf/dsfjhk.html"
# return render_to_response([template, default_template],
return render_to_response(template,
context_variables,
context_instance = RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseDetailListViewError: This html template (" + str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseDetailListViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
@login_required
@get_execution_time
def create_course_struct(request, group_id, node_id):
"""
This view is to create the structure of the Course.
A Course holds CourseSection, which further holds CourseSubSection
in their respective collection_set.
A tree depiction to this is as follows:
Course Name:
1. CourseSection1
1.1. CourseSubSection1
1.2. CourseSubSection2
2. CourseSection2
2.1. CourseSubSection3
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_set_id = None
property_order_list_cs = []
property_order_list_css = []
course_structure_exists = False
title = "Course Authoring"
course_node = node_collection.one({"_id": ObjectId(node_id)})
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_gs = node_collection.collection.GSystem()
cs_gs.member_of.append(cs_gst._id)
property_order_list_cs = get_property_order_with_value(cs_gs)
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_gs = node_collection.collection.GSystem()
css_gs.member_of.append(css_gst._id)
property_order_list_css = get_property_order_with_value(css_gs)
course_collection_list = course_node.collection_set
if course_collection_list:
course_structure_exists = True
# for attr in course_node.attribute_set:
# if attr.has_key("evaluation_type"):
# eval_type = attr["evaluation_type"]
#If evaluation_type flag is True, it is Final. If False, it is Continous
# if(eval_type==u"Final"):
# eval_type_flag = True
# else:
# eval_type_flag = False
if request.method == "GET":
app_id = request.GET.get("app_id", "")
app_set_id = request.GET.get("app_set_id", "")
return render_to_response("ndf/create_course_structure.html",
{'cnode': course_node,
'groupid': group_id,
'group_id': group_id,
'title': title,
'app_id': app_id, 'app_set_id': app_set_id,
'property_order_list': property_order_list_cs,
'property_order_list_css': property_order_list_css
},
context_instance=RequestContext(request)
)
@login_required
def save_course_section(request, group_id):
'''
Accepts:
* NUSSD Course/Course node _id
* CourseSection name
Actions:
* Creates CourseSection GSystem with name received.
* Appends this new CourseSection node id into
NUSSD Course/Course collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
cs_node_name = request.POST.get("cs_name", '')
course_node_id = request.POST.get("course_node_id", '')
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_new = node_collection.collection.GSystem()
cs_new.member_of.append(cs_gst._id)
cs_new.name = cs_node_name
cs_new.modified_by = int(request.user.id)
cs_new.created_by = int(request.user.id)
cs_new.contributors.append(int(request.user.id))
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
cs_new.prior_node.append(ObjectId(course_node._id))
cs_new.save()
node_collection.collection.update({'_id': course_node._id}, {'$push': {'collection_set': cs_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["cs_new_id"] = str(cs_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def save_course_sub_section(request, group_id):
'''
Accepts:
* CourseSection node _id
* CourseSubSection name
Actions:
* Creates CourseSubSection GSystem with name received.
* Appends this new CourseSubSection node id into
CourseSection collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSubSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_name = request.POST.get("css_name", '')
cs_node_id = request.POST.get("cs_node_id", '')
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_new = node_collection.collection.GSystem()
css_new.member_of.append(css_gst._id)
# set name
css_new.name = css_node_name
css_new.modified_by = int(request.user.id)
css_new.created_by = int(request.user.id)
css_new.contributors.append(int(request.user.id))
cs_node = node_collection.one({"_id": ObjectId(cs_node_id)})
css_new.prior_node.append(cs_node._id)
css_new.save()
node_collection.collection.update({'_id': cs_node._id}, {'$push': {'collection_set': css_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["css_new_id"] = str(css_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def change_node_name(request, group_id):
'''
Accepts:
* CourseSection/ CourseSubSection node _id
* New name for CourseSection node
Actions:
* Updates received node's name
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
new_name = request.POST.get("new_name", '')
node = node_collection.one({"_id": ObjectId(node_id)})
node.name = new_name.strip()
node.save()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def change_order(request, group_id):
'''
Accepts:
* 2 node ids.
Basically, either of CourseSection or CourseSubSection
* Parent node id
Either a NUSSD Course/Course or CourseSection
Actions:
* Swaps the 2 node ids in the collection set of received
parent node
'''
response_dict = {"success": False}
collection_set_list = []
if request.is_ajax() and request.method == "POST":
node_id_up = request.POST.get("node_id_up", '')
node_id_down = request.POST.get("node_id_down", '')
parent_node_id = request.POST.get("parent_node", '')
parent_node = node_collection.one({"_id": ObjectId(parent_node_id)})
collection_set_list = parent_node.collection_set
a, b = collection_set_list.index(ObjectId(node_id_up)), collection_set_list.index(ObjectId(node_id_down))
collection_set_list[b], collection_set_list[a] = collection_set_list[a], collection_set_list[b]
node_collection.collection.update({'_id': parent_node._id}, {'$set': {'collection_set': collection_set_list }}, upsert=False, multi=False)
parent_node.reload()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def course_sub_section_prop(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* Properties dict
Actions:
* Creates GAttributes with the values of received dict
for the respective CourseSubSection node
Returns:
* success (i.e True/False)
* If request.method is POST, all GAttributes in a dict structure,
'''
response_dict = {"success": False}
if request.is_ajax():
if request.method == "POST":
assessment_flag = False
css_node_id = request.POST.get("css_node_id", '')
prop_dict = request.POST.get("prop_dict", '')
assessment_chk = json.loads(request.POST.get("assessment_chk", ''))
prop_dict = json.loads(prop_dict)
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
at_cs_hours = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_minutes'})
at_cs_assessment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assessment'})
at_cs_assignment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assignment'})
at_cs_min_marks = node_collection.one({'_type': 'AttributeType', 'name': 'min_marks'})
at_cs_max_marks = node_collection.one({'_type': 'AttributeType', 'name': 'max_marks'})
if assessment_chk is True:
create_gattribute(css_node._id, at_cs_assessment, True)
assessment_flag = True
for propk, propv in prop_dict.items():
# add attributes to css gs
if(propk == "course_structure_minutes"):
create_gattribute(css_node._id, at_cs_hours, int(propv))
elif(propk == "course_structure_assignment"):
create_gattribute(css_node._id, at_cs_assignment, propv)
if assessment_flag:
if(propk == "min_marks"):
create_gattribute(css_node._id, at_cs_min_marks, int(propv))
if(propk == "max_marks"):
create_gattribute(css_node._id, at_cs_max_marks, int(propv))
css_node.reload()
response_dict["success"] = True
else:
css_node_id = request.GET.get("css_node_id", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
if css_node.attribute_set:
for each in css_node.attribute_set:
for k, v in each.items():
response_dict[k] = v
response_dict["success"] = True
else:
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
@login_required
def add_units(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* NUSSD Course/Course node _id
Actions:
* Redirects to course_units.html
'''
variable = None
unit_node = None
css_node_id = request.GET.get('css_node_id', '')
unit_node_id = request.GET.get('unit_node_id', '')
course_node_id = request.GET.get('course_node', '')
app_id = request.GET.get('app_id', '')
app_set_id = request.GET.get('app_set_id', '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
title = "Course Units"
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
variable = RequestContext(request, {
'group_id': group_id, 'groupid': group_id,
'css_node': css_node,
'title': title,
'app_set_id': app_set_id,
'app_id': app_id,
'unit_node': unit_node,
'course_node': course_node,
})
template = "ndf/course_units.html"
return render_to_response(template, variable)
@login_required
def get_resources(request, group_id):
'''
Accepts:
* Name of GSystemType (Page, File, etc.)
* CourseSubSection node _id
* widget_for
Actions:
* Fetches all GSystems of selected GSystemType as resources
Returns:
* Returns Drawer with resources
'''
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get('css_node_id', "")
unit_node_id = request.POST.get('unit_node_id', "")
widget_for = request.POST.get('widget_for', "")
resource_type = request.POST.get('resource_type', "")
resource_type = resource_type.strip()
list_resources = []
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
if resource_type:
if resource_type == "Pandora":
resource_type = "Pandora_video"
resource_gst = node_collection.one({'_type': "GSystemType", 'name': resource_type})
res = node_collection.find(
{
'member_of': resource_gst._id,
'group_set': ObjectId(group_id),
'status': u"PUBLISHED"
}
)
for each in res:
list_resources.append(each)
drawer_template_context = edit_drawer_widget("CourseUnits", group_id, unit_node, None, checked="collection_set", left_drawer_content=list_resources)
drawer_template_context["widget_for"] = widget_for
drawer_widget = render_to_string(
'ndf/drawer_widget.html',
drawer_template_context,
context_instance=RequestContext(request)
)
return HttpResponse(drawer_widget)
else:
error_message = "Resource Drawer: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "Resource Drawer: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@login_required
def save_resources(request, group_id):
'''
Accepts:
* List of resources (i.e GSystem of Page, File, etc.)
* CourseSubSection node _id
Actions:
* Sets the received resources in respective node's collection_set
'''
response_dict = {"success": False,"create_new_unit": True}
if request.is_ajax() and request.method == "POST":
list_of_res = json.loads(request.POST.get('list_of_res', ""))
css_node_id = request.POST.get('css_node', "")
unit_name = request.POST.get('unit_name', "")
unit_name = unit_name.strip()
unit_node_id = request.POST.get('unit_node_id', "")
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
list_of_res_ids = [ObjectId(each_res) for each_res in list_of_res]
try:
cu_new = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_new = None
if not cu_new:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_new = node_collection.collection.GSystem()
cu_new.member_of.append(cu_gst._id)
# set name
cu_new.name = unit_name.strip()
cu_new.modified_by = int(request.user.id)
cu_new.created_by = int(request.user.id)
cu_new.contributors.append(int(request.user.id))
cu_new.prior_node.append(css_node._id)
cu_new.save()
response_dict["create_new_unit"] = True
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'name': unit_name }}, upsert=False, multi=False)
if cu_new._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_new._id }}, upsert=False, multi=False)
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'collection_set':list_of_res_ids}},upsert=False,multi=False)
cu_new.reload()
response_dict["success"] = True
response_dict["cu_new_id"] = str(cu_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def create_edit_unit(request, group_id):
'''
Accepts:
* ObjectId of unit node if exists
* ObjectId of CourseSubSection node
Actions:
* Creates/Updates Unit node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get("css_node_id", '')
unit_node_id = request.POST.get("unit_node_id", '')
unit_name = request.POST.get("unit_name", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
cu_node = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_node = None
if cu_node is None:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_node = node_collection.collection.GSystem()
cu_node.member_of.append(cu_gst._id)
# set name
cu_node.name = unit_name.strip()
cu_node.modified_by = int(request.user.id)
cu_node.created_by = int(request.user.id)
cu_node.contributors.append(int(request.user.id))
cu_node.prior_node.append(css_node._id)
cu_node.save()
response_dict["unit_node_id"] = str(cu_node._id)
node_collection.collection.update({'_id': cu_node._id}, {'$set': {'name': unit_name}}, upsert=False, multi=False)
if cu_node._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_node._id}}, upsert=False, multi=False)
return HttpResponse(json.dumps(response_dict))
@login_required
def delete_from_course_structure(request, group_id):
'''
Accepts:
* ObjectId of node that is to be deleted.
It can be CourseSection/CourseSubSection/CourseUnit
Actions:
* Deletes the received node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
del_stat = False
if request.is_ajax() and request.method == "POST":
oid = request.POST.get("oid", '')
del_stat = delete_item(oid)
if del_stat:
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
def delete_item(item):
node_item = node_collection.one({'_id': ObjectId(item)})
if u"CourseUnit" not in node_item.member_of_names_list and node_item.collection_set:
for each in node_item.collection_set:
d_st = delete_item(each)
del_status, del_status_msg = delete_node(
node_id=node_item._id,
deletion_type=0
)
return del_status
@login_required
def publish_course(request, group_id):
if request.is_ajax() and request.method == "POST":
try:
node_id = request.POST.get("node_id", "")
node = node_collection.one({'_id': ObjectId(node_id)})
node.status = unicode("PUBLISHED")
node.modified_by = int(request.user.id)
node.save()
except:
return HttpResponse("Fail")
return HttpResponse("Success")
| sunnychaudhari/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/course.py | Python | agpl-3.0 | 54,618 | 0.005493 |
# Author: seedboy
# URL: https://github.com/seedboy
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
from sickbeard.providers import generic
from sickbeard import logger
from sickbeard import tvcache
from sickrage.helper.exceptions import AuthException
from sickbeard.bs4_parser import BS4Parser
class IPTorrentsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "IPTorrents")
self.supportsBacklog = True
self.username = None
self.password = None
self.ratio = None
self.freeleech = False
self.minseed = None
self.minleech = None
self.cache = IPTorrentsCache(self)
self.urls = {'base_url': 'https://iptorrents.eu',
'login': 'https://iptorrents.eu/torrents/',
'search': 'https://iptorrents.eu/t?%s%s&q=%s&qf=#torrents'}
self.url = self.urls['base_url']
self.categories = '73=&60='
def isEnabled(self):
return self.enabled
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password,
'login': 'submit'}
self.getURL(self.urls['login'], timeout=30)
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('tries left', response):
logger.log(u"You tried too often, please try again after 1 hour! Disable IPTorrents for at least 1 hour", logger.WARNING)
return False
if re.search('Password not correct', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
freeleech = '&free=on' if self.freeleech else ''
if not self._doLogin():
return results
for mode in search_params.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
# URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
searchURL = self.urls['search'] % (self.categories, freeleech, search_string)
searchURL += ';o=seeders' if mode != 'RSS' else ''
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
try:
data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
if not html:
logger.log("No data returned from provider", logger.DEBUG)
continue
if html.find(text='No Torrents Found!'):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrent_table = html.find('table', attrs={'class': 'torrents'})
torrents = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrents) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for result in torrents[1:]:
try:
title = result.find_all('td')[1].find('a').text
download_url = self.urls['base_url'] + result.find_all('td')[3].find('a')['href']
size = self._convertSize(result.find_all('td')[5].text)
seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).text)
leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).text)
except (AttributeError, TypeError, KeyError):
continue
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Error: %r" % ex(e), logger.ERROR)
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def _convertSize(self, size):
size, modifier = size.split(' ')
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024**2
elif modifier in 'GB':
size = size * 1024**3
elif modifier in 'TB':
size = size * 1024**4
return int(size)
class IPTorrentsCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# Only poll IPTorrents every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = IPTorrentsProvider()
| markrawlingson/SickRage | sickbeard/providers/iptorrents.py | Python | gpl-3.0 | 7,350 | 0.00449 |
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def setup(app):
# The app.add_javascript() is deprecated.
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
| cloud-fan/spark | python/docs/source/conf.py | Python | apache-2.0 | 12,894 | 0.005584 |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/PathList.py 2013/03/03 09:48:35 garyo"
__doc__ = """SCons.PathList
A module for handling lists of directory paths (the sort of things
that get set as CPPPATH, LIBPATH, etc.) with as much caching of data and
efficiency as we can while still keeping the evaluation delayed so that we
Do the Right Thing (almost) regardless of how the variable is specified.
"""
import os
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList(object):
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = pathlist.split(os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
index = p.find('$')
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if index == -1:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(SCons.Util.flatten(value))
elif value:
result.append(value)
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
elif value:
result.append(value)
return tuple(result)
class PathListCache(object):
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('PathList', _PathList_key))
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result
PathList = PathListCache().PathList
del PathListCache
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| aubreyrjones/libesp | scons_local/scons-local-2.3.0/SCons/PathList.py | Python | mit | 8,536 | 0.000937 |
# Copyright 2011 David Malcolm <dmalcolm@redhat.com>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# Verify examining details of functions
import gcc
from gccutils import pprint
def on_pass_execution(p, fn):
if p.name == '*warn_function_return':
assert isinstance(fn, gcc.Function)
print('fn: %r' % fn)
assert isinstance(fn.decl, gcc.FunctionDecl)
print('fn.decl.name: %r' % fn.decl.name)
assert isinstance(fn.decl, gcc.FunctionDecl)
#print(fn.decl.type)
#print(fn.decl.type.argument_types)
#pprint(fn.decl)
print('len(fn.local_decls): %r' % len(fn.local_decls))
for i, local in enumerate(fn.local_decls):
print('local_decls[%i]' % i)
print(' type(local): %r' % type(local))
print(' local.name: %r' % local.name)
# The "initial" only seems to be present for static variables
# with initializers. Other variables seem to get initialized
# in explicit gimple statements (see below)
if local.initial:
print(' local.initial.constant: %r' % local.initial.constant)
else:
print(' local.initial: %r' % local.initial)
print(' str(local.type): %r' % str(local.type))
#pprint(local)
#local.debug()
print('fn.funcdef_no: %r' % fn.funcdef_no)
print('fn.start: %r' % fn.start)
print('fn.end: %r' % fn.end)
assert isinstance(fn.cfg, gcc.Cfg) # None for some early passes
assert len(fn.cfg.basic_blocks) == 3
assert fn.cfg.basic_blocks[0] == fn.cfg.entry
assert fn.cfg.basic_blocks[1] == fn.cfg.exit
bb = fn.cfg.basic_blocks[2]
for i,stmt in enumerate(bb.gimple):
print('gimple[%i]:' % i)
print(' str(stmt): %r' % str(stmt))
print(' repr(stmt): %r' % repr(stmt))
if isinstance(stmt, gcc.GimpleAssign):
print(' str(stmt.lhs): %r' % str(stmt.lhs))
print(' [str(stmt.rhs)]: %r' % [str(item) for item in stmt.rhs])
#print(dir(stmt))
#pprint(stmt)
print('fn.decl.arguments: %r' % fn.decl.arguments)
for i, arg in enumerate(fn.decl.arguments):
print(' arg[%i]:' % i)
print(' arg.name: %r' % arg.name)
print(' str(arg.type): %r' % str(arg.type))
print('type(fn.decl.result): %r' % type(fn.decl.result))
print(' str(fn.decl.result.type): %r' % str(fn.decl.result.type))
gcc.register_callback(gcc.PLUGIN_PASS_EXECUTION,
on_pass_execution)
| ruediger/gcc-python-plugin | tests/plugin/functions/script.py | Python | gpl-3.0 | 3,300 | 0.003636 |
#!/usr/bin/python
import os
import threading
import time
import Queue
import signal
import subprocess
import collections
from json_get import generate_list
q = Queue.Queue()
ls = collections.deque( generate_list())
def showstuff():
while ( True ):
sb = subprocess.Popen(["feh", "-Z", "-g" ,"800x400",ls[0]])
while( True ):
a = q.get()
print a
if ( a == "stop" ):
sb.terminate()
exit()
elif ( a == "next"):
ls.rotate(1)
sb.terminate()
break
def amin():
showOff = threading.Thread(target=showstuff)
showOff.start()
for i in range(6):
time.sleep(5)
q.put("next")
time.sleep(2)
q.put("stop")
amin()
| Programvareverkstedet/pensieve | pensieve.py | Python | gpl-2.0 | 789 | 0.017744 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class MD2M(MEoS):
"""Multiparameter equation of state for decamethyltetrasiloxane"""
name = "decamethyltetrasiloxane"
CASNumber = "141-62-8"
formula = "C10H30Si4O3"
synonym = "MD2M"
rhoc = unidades.Density(284.1716396703609)
Tc = unidades.Temperature(599.40)
Pc = unidades.Pressure(1227.0, "kPa")
M = 310.685 # g/mol
Tt = unidades.Temperature(205.2)
Tb = unidades.Temperature(467.51)
f_acent = 0.668
momentoDipolar = unidades.DipoleMoment(1.12, "Debye")
id = 39
# id = 1837
CP1 = {"ao": 331.9,
"an": [], "pow": [],
"ao_exp": [], "exp": [],
"ao_hyp": [329620742.8, 0, 2556558319.0, 0],
"hyp": [795.1, 0, 1813.8, 0]}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for MD2M of Colonna et al. (2006).",
"__doi__": {"autor": "Colonna, P., Nannan, N.R., and Guardone, A.",
"title": "Multiparameter equations of state for siloxanes: [(CH3)3-Si-O1/2]2-[O-Si-(CH3)2]i=1,…,3, and [O-Si-(CH3)2]6",
"ref": "Fluid Phase Equilibria 263:115-130, 2008",
"doi": "10.1016/j.fluid.2007.10.001"},
"R": 8.314472,
"cp": CP1,
"ref": "NBP",
"Tmin": Tt, "Tmax": 673.0, "Pmax": 30000.0, "rhomax": 3.033,
"Pmin": 0.0000005, "rhomin": 3.032,
"nr1": [1.33840331, -2.62939393, 0.4398383, -0.53496715, 0.1818844,
0.40774609e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [1.13444506, 0.5774631e-1, -0.5917498, -0.11020225,
-0.34942635e-1, 0.7646298e-2],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.0],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
eq = helmholtz1,
_vapor_Pressure = {
"eq": 5,
"ao": [-0.10029e2, 0.44434e1, -0.36753e1, -0.68925e1, -0.32211e1],
"exp": [1.0, 1.5, 2.06, 3.5, 10.0]}
_liquid_Density = {
"eq": 1,
"ao": [0.12608e2, -0.32120e2, 0.33559e2, -0.11695e2, 0.76192],
"exp": [0.48, 0.64, 0.8, 1.0, 2.6]}
_vapor_Density = {
"eq": 3,
"ao": [-0.24684e1, -0.71262e1, -0.27601e2, -0.49458e2, -0.24106e2,
-0.19370e3],
"exp": [0.376, 0.94, 2.9, 5.9, 6.2, 13.0]}
| edusegzy/pychemqt | lib/mEoS/MD2M.py | Python | gpl-3.0 | 2,498 | 0.003205 |
#!/usr/bin/env python
import subprocess
import time
from format import *
class Video():
""" Class to represent a Youtube video."""
def __init__(self, data):
self.id = data['id']
self.title = data['title']
self.description = data['description']
self.user = data['uploader']
self.uploaded = time.strptime(data['uploaded'].replace(".000Z", "").replace("T", " "), "%Y-%m-%d %H:%M:%S")
self.views = int(data['viewCount']) if 'viewCount' in data else 0
self.rating = float(data['rating']) if 'rating' in data else 0
self.likes = int(data['likeCount']) if 'likeCount' in data else 0
self.dislikes = int(data['ratingCount']) - self.likes if 'ratingCount' in data else 0
self.comment_count = int(data['commentCount']) if 'commentCount' in data else 0
self.length = int(data['duration'])
def format_title_desc(self, number):
"""Formats information about the title and description of the video."""
title = str(number) + '. ' + self.title
desc = self.description
return (title, desc)
def format_info(self):
"""Formats other information about the video."""
user = ' ' + quick_fit_string(self.user, 21)
info1 = ' v:' + format_int(self.views, 4) + \
' t:' + quick_fit_string(format_time(self.length), 8)
info2 = ' l:' + format_int(self.likes, 4) + \
' d:' + format_int(self.dislikes, 4) + \
' r:' + quick_fit_string(str(self.rating), 4)
info3 = ' r:' + quick_fit_string(str(self.rating), 4) + \
' u:' + time.strftime('%d/%m/%y', self.uploaded)
return (user, info1, info3)
def play(self, player, args):
"""Opens the video in a video player"""
url = 'https://www.youtube.com/watch?v=' + self.id
player = subprocess.Popen([player] + args.split(' ') + [url], stderr=subprocess.DEVNULL)
| enchuu/yaytp | video.py | Python | mit | 1,966 | 0.006104 |
"""Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
# IMPORTANT: Whenever making changes to this module, be sure to run a top-level
# `make regen-importlib` followed by `make` in order to get the frozen version
# of the module updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module in the early
# stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY
+ _CASE_INSENSITIVE_PLATFORMS_STR_KEY)
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY):
key = 'PYTHONCASEOK'
else:
key = b'PYTHONCASEOK'
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
_code_type = type(_write_atomic.__code__)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT
#3021)
# Python 3.1a1: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD #2183)
# Python 3.1a1: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
#4715)
# Python 3.2a1: 3160 (add SETUP_WITH #6101)
# tag: cpython-32
# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225)
# tag: cpython-32
# Python 3.2a3 3180 (add DELETE_DEREF #4617)
# Python 3.3a1 3190 (__class__ super closure changed)
# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448)
# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645)
# Python 3.3a2 3220 (changed PEP 380 implementation #14230)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults #16967)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars #17853)
# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation #19301)
# Python 3.4a4 3300 (more changes to __qualname__ computation #19301)
# Python 3.4rc2 3310 (alter __qualname__ computation #20625)
# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176)
# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292)
# Python 3.5b2 3340 (fix dictionary display evaluation order #11205)
# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400)
# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286)
# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483)
# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107)
# Python 3.6a2 3370 (16 bit wordcode #26647)
# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140)
# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE
# #27095)
# Python 3.6b1 3373 (add BUILD_STRING opcode #27078)
# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes
# #27985)
# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL
#27213)
# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722)
# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257)
# Python 3.6rc1 3379 (more thorough __class__ validation #23722)
# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110)
# Python 3.7a2 3391 (update GET_AITER #31709)
# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650)
# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550)
# Python 3.7b5 3394 (restored docstring as the first stmt in the body;
# this might affected the first line number #32911)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
#
# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
# in PC/launcher.c must also be updated.
MAGIC_NUMBER = (3394).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
_OPT = 'opt-'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
BYTECODE_SUFFIXES = ['.pyc']
# Deprecated.
DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES
def cache_from_source(path, debug_override=None, *, optimization=None):
"""Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
The 'optimization' parameter controls the presumed optimization level of
the bytecode file. If 'optimization' is not None, the string representation
of the argument is taken and verified to be alphanumeric (else ValueError
is raised).
The debug_override parameter is deprecated. If debug_override is not None,
a True value is the same as setting 'optimization' to the empty string
while a False value is equivalent to setting 'optimization' to '1'.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if debug_override is not None:
_warnings.warn('the debug_override parameter is deprecated; use '
"'optimization' instead", DeprecationWarning)
if optimization is not None:
message = 'debug_override or optimization must be set to None'
raise TypeError(message)
optimization = '' if debug_override else 1
path = _os.fspath(path)
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
almost_filename = ''.join([(base if base else rest), sep, tag])
if optimization is None:
if sys.flags.optimize == 0:
optimization = ''
else:
optimization = sys.flags.optimize
optimization = str(optimization)
if optimization != '':
if not optimization.isalnum():
raise ValueError('{!r} is not alphanumeric'.format(optimization))
almost_filename = '{}.{}{}'.format(almost_filename, _OPT, optimization)
return _path_join(head, _PYCACHE, almost_filename + BYTECODE_SUFFIXES[0])
def source_from_cache(path):
"""Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147/488 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
path = _os.fspath(path)
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
dot_count = pycache_filename.count('.')
if dot_count not in {2, 3}:
raise ValueError('expected only 2 or 3 dots in '
'{!r}'.format(pycache_filename))
elif dot_count == 3:
optimization = pycache_filename.rsplit('.', 2)[-2]
if not optimization.startswith(_OPT):
raise ValueError("optimization portion of filename does not start "
"with {!r}".format(_OPT))
opt_level = optimization[len(_OPT):]
if not opt_level.isalnum():
raise ValueError("optimization level {!r} is not an alphanumeric "
"value".format(optimization))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _get_cached(filename):
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
return cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
return filename
else:
return None
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader for %s cannot handle %s' %
(self.name, name), name=name)
return method(self, name, *args, **kwargs)
try:
_wrap = _bootstrap._wrap
except NameError:
# XXX yuck
def _wrap(new, old):
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _classify_pyc(data, name, exc_details):
"""Perform basic validity checking of a pyc header and return the flags field,
which determines how the pyc should be further validated against the source.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required, though.)
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
ImportError is raised when the magic number is incorrect or when the flags
field is invalid. EOFError is raised when the data is found to be truncated.
"""
magic = data[:4]
if magic != MAGIC_NUMBER:
message = f'bad magic number in {name!r}: {magic!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if len(data) < 16:
message = f'reached EOF while reading pyc header of {name!r}'
_bootstrap._verbose_message('{}', message)
raise EOFError(message)
flags = _r_long(data[4:8])
# Only the first two flags are defined.
if flags & ~0b11:
message = f'invalid flags {flags!r} in {name!r}'
raise ImportError(message, **exc_details)
return flags
def _validate_timestamp_pyc(data, source_mtime, source_size, name,
exc_details):
"""Validate a pyc against the source last-modified time.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_mtime* is the last modified timestamp of the source file.
*source_size* is None or the size of the source file in bytes.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if _r_long(data[8:12]) != (source_mtime & 0xFFFFFFFF):
message = f'bytecode is stale for {name!r}'
_bootstrap._verbose_message('{}', message)
raise ImportError(message, **exc_details)
if (source_size is not None and
_r_long(data[12:16]) != (source_size & 0xFFFFFFFF)):
raise ImportError(f'bytecode is stale for {name!r}', **exc_details)
def _validate_hash_pyc(data, source_hash, name, exc_details):
"""Validate a hash-based pyc by checking the real source hash against the one in
the pyc header.
*data* is the contents of the pyc file. (Only the first 16 bytes are
required.)
*source_hash* is the importlib.util.source_hash() of the source file.
*name* is the name of the module being imported. It is used for logging.
*exc_details* is a dictionary passed to ImportError if it raised for
improved debugging.
An ImportError is raised if the bytecode is stale.
"""
if data[8:16] != source_hash:
raise ImportError(
f'hash in bytecode doesn\'t match hash of source {name!r}',
**exc_details,
)
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as found in a pyc."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_bootstrap._verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_timestamp_pyc(code, mtime=0, source_size=0):
"Produce the data for a timestamp-based pyc."
data = bytearray(MAGIC_NUMBER)
data.extend(_w_long(0))
data.extend(_w_long(mtime))
data.extend(_w_long(source_size))
data.extend(marshal.dumps(code))
return data
def _code_to_hash_pyc(code, source_hash, checked=True):
"Produce the data for a hash-based pyc."
data = bytearray(MAGIC_NUMBER)
flags = 0b1 | checked << 1
data.extend(_w_long(flags))
assert len(source_hash) == 8
data.extend(source_hash)
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
else:
location = _os.fspath(location)
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = _bootstrap.ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
# Loaders #####################################################################
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version='%d.%d' % sys.version_info[:2])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = _bootstrap.spec_from_loader(fullname,
loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def create_module(self, spec):
"""Use default semantics for module creation."""
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_bootstrap._call_with_frames_removed(exec, code, module.__dict__)
def load_module(self, fullname):
"""This module is deprecated."""
return _bootstrap._load_module_shim(self, fullname)
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
Raises OSError when the path cannot be handled.
"""
raise OSError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises OSError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _bootstrap._call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
source_bytes = None
source_hash = None
hash_based = False
check_source = True
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except OSError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
exc_details = {
'name': fullname,
'path': bytecode_path,
}
try:
flags = _classify_pyc(data, fullname, exc_details)
bytes_data = memoryview(data)[16:]
hash_based = flags & 0b1 != 0
if hash_based:
check_source = flags & 0b10 != 0
if (_imp.check_hash_based_pycs != 'never' and
(check_source or
_imp.check_hash_based_pycs == 'always')):
source_bytes = self.get_data(source_path)
source_hash = _imp.source_hash(
_RAW_MAGIC_NUMBER,
source_bytes,
)
_validate_hash_pyc(data, source_hash, fullname,
exc_details)
else:
_validate_timestamp_pyc(
data,
source_mtime,
st['size'],
fullname,
exc_details,
)
except (ImportError, EOFError):
pass
else:
_bootstrap._verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
if source_bytes is None:
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_bootstrap._verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
if hash_based:
if source_hash is None:
source_hash = _imp.source_hash(source_bytes)
data = _code_to_hash_pyc(code_object, source_hash, check_source)
else:
data = _code_to_timestamp_pyc(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_bootstrap._verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
# ResourceReader ABC API.
@_check_name
def get_resource_reader(self, module):
if self.is_package(module):
return self
return None
def open_resource(self, resource):
path = _path_join(_path_split(self.path)[0], resource)
return _io.FileIO(path, 'r')
def resource_path(self, resource):
if not self.is_resource(resource):
raise FileNotFoundError
path = _path_join(_path_split(self.path)[0], resource)
return path
def is_resource(self, name):
if path_sep in name:
return False
path = _path_join(_path_split(self.path)[0], name)
return _path_isfile(path)
def contents(self):
return iter(_os.listdir(_path_split(self.path)[0]))
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_bootstrap._verbose_message('could not create {!r}: {!r}',
parent, exc)
return
try:
_write_atomic(path, data, _mode)
_bootstrap._verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_bootstrap._verbose_message('could not create {!r}: {!r}', path,
exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
# Call _classify_pyc to do basic validation of the pyc but ignore the
# result. There's no source to check against.
exc_details = {
'name': fullname,
'path': path,
}
_classify_pyc(data, fullname, exc_details)
return _compile_bytecode(
memoryview(data)[16:],
name=fullname,
bytecode_path=path,
)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader(FileLoader, _LoaderBasics):
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
def create_module(self, spec):
"""Create an unitialized extension module"""
module = _bootstrap._call_with_frames_removed(
_imp.create_dynamic, spec)
_bootstrap._verbose_message('extension module {!r} loaded from {!r}',
spec.name, self.path)
return module
def exec_module(self, module):
"""Initialize an extension module"""
_bootstrap._call_with_frames_removed(_imp.exec_dynamic, module)
_bootstrap._verbose_message('extension module {!r} executed from {!r}',
self.name, self.path)
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __setitem__(self, index, path):
self._path[index] = path
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in module_from_spec() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def create_module(self, spec):
"""Use default semantics for module creation."""
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_bootstrap._verbose_message('namespace module loaded with path {!r}',
self._path)
return _bootstrap._load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for name, finder in list(sys.path_importer_cache.items()):
if finder is None:
del sys.path_importer_cache[name]
elif hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sys.path_hooks for a finder for 'path'."""
if sys.path_hooks is not None and not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
try:
path = _os.getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return _bootstrap.spec_from_loader(fullname, loader)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""Try to find a spec for 'fullname' on sys.path or 'path'.
The search is based on sys.path_hooks and sys.path_importer_cache.
"""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a spec which
# can create the namespace package.
spec.origin = None
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a spec for the specified module.
Returns the matching spec, or None if not found.
"""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_bootstrap._verbose_message('trying {}', full_path, verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path,
None, target)
if is_namespace:
_bootstrap._verbose_message('possible namespace for {}', base_path)
spec = _bootstrap.ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import setup ###############################################################
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def _setup(_bootstrap_module):
"""Setup the path-based importers for importlib by importing needed
built-in modules and injecting them into the global namespace.
Other components are extracted from the core bootstrap module.
"""
global sys, _imp, _bootstrap
_bootstrap = _bootstrap_module
sys = _bootstrap.sys
_imp = _bootstrap._imp
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = _bootstrap._builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _bootstrap._builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
# Directly load the _thread module (needed during bootstrap).
thread_module = _bootstrap._builtin_from_name('_thread')
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _bootstrap._builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
# Directly load the winreg module (needed during bootstrap).
if builtin_os == 'nt':
winreg_module = _bootstrap._builtin_from_name('winreg')
setattr(self_module, '_winreg', winreg_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(_bootstrap_module):
"""Install the path-based import components."""
_setup(_bootstrap_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(PathFinder)
| prefetchnta/questlab | bin/x64bin/python/37/Lib/importlib/_bootstrap_external.py | Python | lgpl-2.1 | 60,574 | 0.000528 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.