text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides some classes for setting up (partially-populated)
# homeservers; either as a full homeserver as a real application, or a small
# partial one for unit test mocking.
# Imports required for the default HomeServer() implementation
import logging
from twisted.enterprise import adbapi
from twisted.web.client import BrowserLikePolicyForHTTPS
from synapse.api.auth import Auth
from synapse.api.filtering import Filtering
from synapse.api.ratelimiting import Ratelimiter
from synapse.appservice.api import ApplicationServiceApi
from synapse.appservice.scheduler import ApplicationServiceScheduler
from synapse.crypto.keyring import Keyring
from synapse.events.builder import EventBuilderFactory
from synapse.federation import initialize_http_replication
from synapse.federation.send_queue import FederationRemoteSendQueue
from synapse.federation.transport.client import TransportLayerClient
from synapse.federation.transaction_queue import TransactionQueue
from synapse.handlers import Handlers
from synapse.handlers.appservice import ApplicationServicesHandler
from synapse.handlers.auth import AuthHandler, MacaroonGeneartor
from synapse.handlers.devicemessage import DeviceMessageHandler
from synapse.handlers.device import DeviceHandler
from synapse.handlers.e2e_keys import E2eKeysHandler
from synapse.handlers.presence import PresenceHandler
from synapse.handlers.room_list import RoomListHandler
from synapse.handlers.sync import SyncHandler
from synapse.handlers.typing import TypingHandler
from synapse.handlers.events import EventHandler, EventStreamHandler
from synapse.handlers.initial_sync import InitialSyncHandler
from synapse.handlers.receipts import ReceiptsHandler
from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.notifier import Notifier
from synapse.push.pusherpool import PusherPool
from synapse.rest.media.v1.media_repository import MediaRepository
from synapse.state import StateHandler
from synapse.storage import DataStore
from synapse.streams.events import EventSources
from synapse.util import Clock
from synapse.util.distributor import Distributor
logger = logging.getLogger(__name__)
class HomeServer(object):
"""A basic homeserver object without lazy component builders.
This will need all of the components it requires to either be passed as
constructor arguments, or the relevant methods overriding to create them.
Typically this would only be used for unit tests.
For every dependency in the DEPENDENCIES list below, this class creates one
method,
def get_DEPENDENCY(self)
which returns the value of that dependency. If no value has yet been set
nor was provided to the constructor, it will attempt to call a lazy builder
method called
def build_DEPENDENCY(self)
which must be implemented by the subclass. This code may call any of the
required "get" methods on the instance to obtain the sub-dependencies that
one requires.
"""
DEPENDENCIES = [
'config',
'clock',
'http_client',
'db_pool',
'persistence_service',
'replication_layer',
'datastore',
'handlers',
'v1auth',
'auth',
'rest_servlet_factory',
'state_handler',
'presence_handler',
'sync_handler',
'typing_handler',
'room_list_handler',
'auth_handler',
'device_handler',
'e2e_keys_handler',
'event_handler',
'event_stream_handler',
'initial_sync_handler',
'application_service_api',
'application_service_scheduler',
'application_service_handler',
'device_message_handler',
'notifier',
'distributor',
'client_resource',
'resource_for_federation',
'resource_for_static_content',
'resource_for_web_client',
'resource_for_content_repo',
'resource_for_server_key',
'resource_for_server_key_v2',
'resource_for_media_repository',
'resource_for_metrics',
'event_sources',
'ratelimiter',
'keyring',
'pusherpool',
'event_builder_factory',
'filtering',
'http_client_context_factory',
'simple_http_client',
'media_repository',
'federation_transport_client',
'federation_sender',
'receipts_handler',
'macaroon_generator',
]
def __init__(self, hostname, **kwargs):
"""
Args:
hostname : The hostname for the server.
"""
self.hostname = hostname
self._building = {}
self.clock = Clock()
self.distributor = Distributor()
self.ratelimiter = Ratelimiter()
# Other kwargs are explicit dependencies
for depname in kwargs:
setattr(self, depname, kwargs[depname])
def setup(self):
logger.info("Setting up.")
self.datastore = DataStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def get_ip_from_request(self, request):
# X-Forwarded-For is handled by our custom request type.
return request.getClientIP()
def is_mine(self, domain_specific_string):
return domain_specific_string.domain == self.hostname
def is_mine_id(self, string):
return string.split(":", 1)[1] == self.hostname
def build_replication_layer(self):
return initialize_http_replication(self)
def build_handlers(self):
return Handlers(self)
def build_notifier(self):
return Notifier(self)
def build_auth(self):
return Auth(self)
def build_http_client_context_factory(self):
return (
InsecureInterceptableContextFactory()
if self.config.use_insecure_ssl_client_just_for_testing_do_not_use
else BrowserLikePolicyForHTTPS()
)
def build_simple_http_client(self):
return SimpleHttpClient(self)
def build_v1auth(self):
orf = Auth(self)
# Matrix spec makes no reference to what HTTP status code is returned,
# but the V1 API uses 403 where it means 401, and the webclient
# relies on this behaviour, so V1 gets its own copy of the auth
# with backwards compat behaviour.
orf.TOKEN_NOT_FOUND_HTTP_STATUS = 403
return orf
def build_state_handler(self):
return StateHandler(self)
def build_presence_handler(self):
return PresenceHandler(self)
def build_typing_handler(self):
return TypingHandler(self)
def build_sync_handler(self):
return SyncHandler(self)
def build_room_list_handler(self):
return RoomListHandler(self)
def build_auth_handler(self):
return AuthHandler(self)
def build_macaroon_generator(self):
return MacaroonGeneartor(self)
def build_device_handler(self):
return DeviceHandler(self)
def build_device_message_handler(self):
return DeviceMessageHandler(self)
def build_e2e_keys_handler(self):
return E2eKeysHandler(self)
def build_application_service_api(self):
return ApplicationServiceApi(self)
def build_application_service_scheduler(self):
return ApplicationServiceScheduler(self)
def build_application_service_handler(self):
return ApplicationServicesHandler(self)
def build_event_handler(self):
return EventHandler(self)
def build_event_stream_handler(self):
return EventStreamHandler(self)
def build_initial_sync_handler(self):
return InitialSyncHandler(self)
def build_event_sources(self):
return EventSources(self)
def build_keyring(self):
return Keyring(self)
def build_event_builder_factory(self):
return EventBuilderFactory(
clock=self.get_clock(),
hostname=self.hostname,
)
def build_filtering(self):
return Filtering(self)
def build_pusherpool(self):
return PusherPool(self)
def build_http_client(self):
return MatrixFederationHttpClient(self)
def build_db_pool(self):
name = self.db_config["name"]
return adbapi.ConnectionPool(
name,
**self.db_config.get("args", {})
)
def build_media_repository(self):
return MediaRepository(self)
def build_federation_transport_client(self):
return TransportLayerClient(self)
def build_federation_sender(self):
if self.should_send_federation():
return TransactionQueue(self)
elif not self.config.worker_app:
return FederationRemoteSendQueue(self)
else:
raise Exception("Workers cannot send federation traffic")
def build_receipts_handler(self):
return ReceiptsHandler(self)
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
def should_send_federation(self):
"Should this server be sending federation traffic directly?"
return self.config.send_federation and (
not self.config.worker_app
or self.config.worker_app == "synapse.app.federation_sender"
)
def _make_dependency_method(depname):
def _get(hs):
try:
return getattr(hs, depname)
except AttributeError:
pass
try:
builder = getattr(hs, "build_%s" % (depname))
except AttributeError:
builder = None
if builder:
# Prevent cyclic dependencies from deadlocking
if depname in hs._building:
raise ValueError("Cyclic dependency while building %s" % (
depname,
))
hs._building[depname] = 1
dep = builder()
setattr(hs, depname, dep)
del hs._building[depname]
return dep
raise NotImplementedError(
"%s has no %s nor a builder for it" % (
type(hs).__name__, depname,
)
)
setattr(HomeServer, "get_%s" % (depname), _get)
# Build magic accessors for every dependency
for depname in HomeServer.DEPENDENCIES:
_make_dependency_method(depname)
| TribeMedia/synapse | synapse/server.py | Python | apache-2.0 | 11,005 | 0.000091 |
#############################################################################
# $HeadURL$
#############################################################################
""" ..mod: FTSRequest
=================
Helper class to perform FTS job submission and monitoring.
"""
# # imports
import sys
import re
import time
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import checkGuid
from DIRAC.Core.Utilities.Adler import compareAdler, intAdlerToHex, hexAdlerToInt
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
from DIRAC.Core.Utilities.Time import dateTime
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
# # RCSID
__RCSID__ = "$Id$"
class FTSRequest( object ):
"""
.. class:: FTSRequest
Helper class for FTS job submission and monitoring.
"""
# # default checksum type
__defaultCksmType = "ADLER32"
# # flag to disablr/enable checksum test, default: disabled
__cksmTest = False
def __init__( self ):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
# # final states tuple
self.finalStates = ( 'Canceled', 'Failed', 'Hold',
'Finished', 'FinishedDirty' )
# # failed states tuple
self.failedStates = ( 'Canceled', 'Failed',
'Hold', 'FinishedDirty' )
# # successful states tuple
self.successfulStates = ( 'Finished', 'Done' )
# # all file states tuple
self.fileStates = ( 'Done', 'Active', 'Pending', 'Ready', 'Canceled', 'Failed',
'Finishing', 'Finished', 'Submitted', 'Hold', 'Waiting' )
self.statusSummary = {}
# # request status
self.requestStatus = 'Unknown'
# # dict for FTS job files
self.fileDict = {}
# # dict for replicas information
self.catalogReplicas = {}
# # dict for metadata information
self.catalogMetadata = {}
# # dict for files that failed to register
self.failedRegistrations = {}
# # placehoder for FileCatalog reference
self.oCatalog = None
# # submit timestamp
self.submitTime = ''
# # placeholder FTS job GUID
self.ftsGUID = ''
# # placeholder for FTS server URL
self.ftsServer = ''
# # flag marking FTS job completness
self.isTerminal = False
# # completness percentage
self.percentageComplete = 0.0
# # source SE name
self.sourceSE = ''
# # flag marking source SE validity
self.sourceValid = False
# # source space token
self.sourceToken = ''
# # target SE name
self.targetSE = ''
# # flag marking target SE validity
self.targetValid = False
# # target space token
self.targetToken = ''
# # placeholder for target StorageElement
self.oTargetSE = None
# # placeholder for source StorageElement
self.oSourceSE = None
# # checksum type, set it to default
self.__cksmType = self.__defaultCksmType
# # disable checksum test by default
self.__cksmTest = False
# # statuses that prevent submitting to FTS
self.noSubmitStatus = ( 'Failed', 'Done', 'Staging' )
# # were sources resolved?
self.sourceResolved = False
# # Number of file transfers actually submitted
self.submittedFiles = 0
self.transferTime = 0
self.submitCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/SubmitCommand', 'glite-transfer-submit' )
self.monitorCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/MonitorCommand', 'glite-transfer-status' )
self.ftsJob = None
self.ftsFiles = []
####################################################################
#
# Methods for setting/getting/checking the SEs
#
def setSourceSE( self, se ):
""" set SE for source
:param self: self reference
:param str se: source SE name
"""
if se == self.targetSE:
return S_ERROR( "SourceSE is TargetSE" )
self.sourceSE = se
self.oSourceSE = StorageElement( self.sourceSE )
return self.__checkSourceSE()
def __checkSourceSE( self ):
""" check source SE availability
:param self: self reference
"""
if not self.sourceSE:
return S_ERROR( "SourceSE not set" )
res = self.oSourceSE.isValid( 'Read' )
if not res['OK']:
return S_ERROR( "SourceSE not available for reading" )
res = self.__getSESpaceToken( self.oSourceSE )
if not res['OK']:
self.log.error( "FTSRequest failed to get SRM Space Token for SourceSE", res['Message'] )
return S_ERROR( "SourceSE does not support FTS transfers" )
if self.__cksmTest:
res = self.oSourceSE.getChecksumType()
if not res["OK"]:
self.log.error( "Unable to get checksum type for SourceSE",
"%s: %s" % ( self.sourceSE, res["Message"] ) )
cksmType = res["Value"]
if cksmType in ( "NONE", "NULL" ):
self.log.warn( "Checksum type set to %s at SourceSE %s, disabling checksum test" % ( cksmType,
self.sourceSE ) )
self.__cksmTest = False
elif cksmType != self.__cksmType:
self.log.warn( "Checksum type mismatch, disabling checksum test" )
self.__cksmTest = False
self.sourceToken = res['Value']
self.sourceValid = True
return S_OK()
def setTargetSE( self, se ):
""" set target SE
:param self: self reference
:param str se: target SE name
"""
if se == self.sourceSE:
return S_ERROR( "TargetSE is SourceSE" )
self.targetSE = se
self.oTargetSE = StorageElement( self.targetSE )
return self.__checkTargetSE()
def setTargetToken( self, token ):
""" target space token setter
:param self: self reference
:param str token: target space token
"""
self.targetToken = token
return S_OK()
def __checkTargetSE( self ):
""" check target SE availability
:param self: self reference
"""
if not self.targetSE:
return S_ERROR( "TargetSE not set" )
res = self.oTargetSE.isValid( 'Write' )
if not res['OK']:
return S_ERROR( "TargetSE not available for writing" )
res = self.__getSESpaceToken( self.oTargetSE )
if not res['OK']:
self.log.error( "FTSRequest failed to get SRM Space Token for TargetSE", res['Message'] )
return S_ERROR( "TargetSE does not support FTS transfers" )
# # check checksum types
if self.__cksmTest:
res = self.oTargetSE.getChecksumType()
if not res["OK"]:
self.log.error( "Unable to get checksum type for TargetSE",
"%s: %s" % ( self.targetSE, res["Message"] ) )
cksmType = res["Value"]
if cksmType in ( "NONE", "NULL" ):
self.log.warn( "Checksum type set to %s at TargetSE %s, disabling checksum test" % ( cksmType,
self.targetSE ) )
self.__cksmTest = False
elif cksmType != self.__cksmType:
self.log.warn( "Checksum type mismatch, disabling checksum test" )
self.__cksmTest = False
self.targetToken = res['Value']
self.targetValid = True
return S_OK()
@staticmethod
def __getSESpaceToken( oSE ):
""" get space token from StorageElement instance
:param self: self reference
:param StorageElement oSE: StorageElement instance
"""
res = oSE.getStorageParameters( "SRM2" )
if not res['OK']:
return res
return S_OK( res['Value'].get( 'SpaceToken' ) )
####################################################################
#
# Methods for setting/getting FTS request parameters
#
def setFTSGUID( self, guid ):
""" FTS job GUID setter
:param self: self reference
:param str guid: string containg GUID
"""
if not checkGuid( guid ):
return S_ERROR( "Incorrect GUID format" )
self.ftsGUID = guid
return S_OK()
def setFTSServer( self, server ):
""" FTS server setter
:param self: self reference
:param str server: FTS server URL
"""
self.ftsServer = server
return S_OK()
def isRequestTerminal( self ):
""" check if FTS job has terminated
:param self: self reference
"""
if self.requestStatus in self.finalStates:
self.isTerminal = True
return S_OK( self.isTerminal )
def setCksmTest( self, cksmTest = False ):
""" set cksm test
:param self: self reference
:param bool cksmTest: flag to enable/disable checksum test
"""
self.__cksmTest = bool( cksmTest )
return S_OK( self.__cksmTest )
####################################################################
#
# Methods for setting/getting/checking files and their metadata
#
def setLFN( self, lfn ):
""" add LFN :lfn: to :fileDict:
:param self: self reference
:param str lfn: LFN to add to
"""
self.fileDict.setdefault( lfn, {'Status':'Waiting'} )
return S_OK()
def setSourceSURL( self, lfn, surl ):
""" source SURL setter
:param self: self reference
:param str lfn: LFN
:param str surl: source SURL
"""
target = self.fileDict[lfn].get( 'Target' )
if target == surl:
return S_ERROR( "Source and target the same" )
return self.__setFileParameter( lfn, 'Source', surl )
def getSourceSURL( self, lfn ):
""" get source SURL for LFN :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Source' )
def setTargetSURL( self, lfn, surl ):
""" set target SURL for LFN :lfn:
:param self: self reference
:param str lfn: LFN
:param str surl: target SURL
"""
source = self.fileDict[lfn].get( 'Source' )
if source == surl:
return S_ERROR( "Source and target the same" )
return self.__setFileParameter( lfn, 'Target', surl )
def getFailReason( self, lfn ):
""" get fail reason for file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Reason' )
def getRetries( self, lfn ):
""" get number of attepmts made to transfer file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Retries' )
def getTransferTime( self, lfn ):
""" get duration of transfer for file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Duration' )
def getFailed( self ):
""" get list of wrongly transferred LFNs
:param self: self reference
"""
return S_OK( [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) in self.failedStates ] )
def getStaging( self ):
""" get files set for prestaging """
return S_OK( [lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) == 'Staging'] )
def getDone( self ):
""" get list of succesfully transferred LFNs
:param self: self reference
"""
return S_OK( [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) in self.successfulStates ] )
def __setFileParameter( self, lfn, paramName, paramValue ):
""" set :paramName: to :paramValue: for :lfn: file
:param self: self reference
:param str lfn: LFN
:param str paramName: parameter name
:param mixed paramValue: a new parameter value
"""
self.setLFN( lfn )
self.fileDict[lfn][paramName] = paramValue
return S_OK()
def __getFileParameter( self, lfn, paramName ):
""" get value of :paramName: for file :lfn:
:param self: self reference
:param str lfn: LFN
:param str paramName: parameter name
"""
if lfn not in self.fileDict:
return S_ERROR( "Supplied file not set" )
if paramName not in self.fileDict[lfn]:
return S_ERROR( "%s not set for file" % paramName )
return S_OK( self.fileDict[lfn][paramName] )
####################################################################
#
# Methods for submission
#
def submit( self, monitor = False, printOutput = True ):
""" submit FTS job
:param self: self reference
:param bool monitor: flag to monitor progress of FTS job
:param bool printOutput: flag to print output of execution to stdout
"""
res = self.__prepareForSubmission()
if not res['OK']:
return res
res = self.__submitFTSTransfer()
if not res['OK']:
return res
resDict = { 'ftsGUID' : self.ftsGUID, 'ftsServer' : self.ftsServer, 'submittedFiles' : self.submittedFiles }
if monitor or printOutput:
gLogger.always( "Submitted %s@%s" % ( self.ftsGUID, self.ftsServer ) )
if monitor:
self.monitor( untilTerminal = True, printOutput = printOutput, full = False )
return S_OK( resDict )
def __prepareForSubmission( self ):
""" check validity of job before submission
:param self: self reference
"""
if not self.fileDict:
return S_ERROR( "No files set" )
if not self.sourceValid:
return S_ERROR( "SourceSE not valid" )
if not self.targetValid:
return S_ERROR( "TargetSE not valid" )
if not self.ftsServer:
res = self.__resolveFTSServer()
if not res['OK']:
return S_ERROR( "FTSServer not valid" )
self.resolveSource()
self.resolveTarget()
res = self.__filesToSubmit()
if not res['OK']:
return S_ERROR( "No files to submit" )
return S_OK()
def __getCatalogObject( self ):
""" CatalogInterface instance facade
:param self: self reference
"""
try:
if not self.oCatalog:
self.oCatalog = FileCatalog()
return S_OK()
except:
return S_ERROR()
def __updateReplicaCache( self, lfns = None, overwrite = False ):
""" update replica cache for list of :lfns:
:param self: self reference
:param mixed lfns: list of LFNs
:param bool overwrite: flag to trigger cache clearing and updating
"""
if not lfns:
lfns = self.fileDict.keys()
toUpdate = [ lfn for lfn in lfns if ( lfn not in self.catalogReplicas ) or overwrite ]
if not toUpdate:
return S_OK()
res = self.__getCatalogObject()
if not res['OK']:
return res
res = self.oCatalog.getReplicas( toUpdate )
if not res['OK']:
return S_ERROR( "Failed to update replica cache: %s" % res['Message'] )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
for lfn, replicas in res['Value']['Successful'].items():
self.catalogReplicas[lfn] = replicas
return S_OK()
def __updateMetadataCache( self, lfns = None ):
""" update metadata cache for list of LFNs
:param self: self reference
:param list lnfs: list of LFNs
"""
if not lfns:
lfns = self.fileDict.keys()
toUpdate = [ lfn for lfn in lfns if lfn not in self.catalogMetadata ]
if not toUpdate:
return S_OK()
res = self.__getCatalogObject()
if not res['OK']:
return res
res = self.oCatalog.getFileMetadata( toUpdate )
if not res['OK']:
return S_ERROR( "Failed to get source catalog metadata: %s" % res['Message'] )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
for lfn, metadata in res['Value']['Successful'].items():
self.catalogMetadata[lfn] = metadata
return S_OK()
def resolveSource( self ):
""" resolve source SE eligible for submission
:param self: self reference
"""
# Avoid resolving sources twice
if self.sourceResolved:
return S_OK()
# Only resolve files that need a transfer
toResolve = [ lfn for lfn in self.fileDict if self.fileDict[lfn].get( "Status", "" ) != "Failed" ]
if not toResolve:
return S_OK()
res = self.__updateMetadataCache( toResolve )
if not res['OK']:
return res
res = self.__updateReplicaCache( toResolve )
if not res['OK']:
return res
# Define the source URLs
for lfn in toResolve:
replicas = self.catalogReplicas.get( lfn, {} )
if self.sourceSE not in replicas:
gLogger.warn( "resolveSource: skipping %s - not replicas at SourceSE %s" % ( lfn, self.sourceSE ) )
self.__setFileParameter( lfn, 'Reason', "No replica at SourceSE" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = returnSingleResult( self.oSourceSE.getURL( lfn, protocol = 'srm' ) )
if not res['OK']:
gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = self.setSourceSURL( lfn, res['Value'] )
if not res['OK']:
gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
toResolve = []
for lfn in self.fileDict:
if "Source" in self.fileDict[lfn]:
toResolve.append( lfn )
if not toResolve:
return S_ERROR( "No eligible Source files" )
# Get metadata of the sources, to check for existance, availability and caching
res = self.oSourceSE.getFileMetadata( toResolve )
if not res['OK']:
return S_ERROR( "Failed to check source file metadata" )
for lfn, error in res['Value']['Failed'].items():
if re.search( 'File does not exist', error ):
gLogger.warn( "resolveSource: skipping %s - source file does not exists" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file does not exist" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
gLogger.warn( "resolveSource: skipping %s - failed to get source metadata" % lfn )
self.__setFileParameter( lfn, 'Reason', "Failed to get Source metadata" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
toStage = []
nbStagedFiles = 0
for lfn, metadata in res['Value']['Successful'].items():
lfnStatus = self.fileDict.get( lfn, {} ).get( 'Status' )
if metadata['Unavailable']:
gLogger.warn( "resolveSource: skipping %s - source file unavailable" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file Unavailable" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif metadata['Lost']:
gLogger.warn( "resolveSource: skipping %s - source file lost" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file Lost" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif not metadata['Cached']:
if lfnStatus != 'Staging':
toStage.append( lfn )
elif metadata['Size'] != self.catalogMetadata[lfn]['Size']:
gLogger.warn( "resolveSource: skipping %s - source file size mismatch" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source size mismatch" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif self.catalogMetadata[lfn]['Checksum'] and metadata['Checksum'] and \
not compareAdler( metadata['Checksum'], self.catalogMetadata[lfn]['Checksum'] ):
gLogger.warn( "resolveSource: skipping %s - source file checksum mismatch" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source checksum mismatch" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif lfnStatus == 'Staging':
# file that was staging is now cached
self.__setFileParameter( lfn, 'Status', 'Waiting' )
nbStagedFiles += 1
# Some files were being staged
if nbStagedFiles:
self.log.info( 'resolveSource: %d files have been staged' % nbStagedFiles )
# Launching staging of files not in cache
if toStage:
gLogger.warn( "resolveSource: %s source files not cached, prestaging..." % len( toStage ) )
stage = self.oSourceSE.prestageFile( toStage )
if not stage["OK"]:
gLogger.error( "resolveSource: error is prestaging", stage["Message"] )
for lfn in toStage:
self.__setFileParameter( lfn, 'Reason', stage["Message"] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
for lfn in toStage:
if lfn in stage['Value']['Successful']:
self.__setFileParameter( lfn, 'Status', 'Staging' )
elif lfn in stage['Value']['Failed']:
self.__setFileParameter( lfn, 'Reason', stage['Value']['Failed'][lfn] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
self.sourceResolved = True
return S_OK()
def resolveTarget( self ):
""" find target SE eligible for submission
:param self: self reference
"""
toResolve = [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status' ) not in self.noSubmitStatus ]
if not toResolve:
return S_OK()
res = self.__updateReplicaCache( toResolve )
if not res['OK']:
return res
for lfn in toResolve:
res = returnSingleResult( self.oTargetSE.getURL( lfn, protocol = 'srm' ) )
if not res['OK']:
reason = res.get( 'Message', res['Message'] )
gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, reason ) )
self.__setFileParameter( lfn, 'Reason', reason )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = self.setTargetSURL( lfn, res['Value'] )
if not res['OK']:
gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
toResolve = []
for lfn in self.fileDict:
if "Target" in self.fileDict[lfn]:
toResolve.append( lfn )
if not toResolve:
return S_ERROR( "No eligible Target files" )
res = self.oTargetSE.exists( toResolve )
if not res['OK']:
return S_ERROR( "Failed to check target existence" )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
toRemove = []
for lfn, exists in res['Value']['Successful'].items():
if exists:
res = self.getSourceSURL( lfn )
if not res['OK']:
gLogger.warn( "resolveTarget: skipping %s - target exists" % lfn )
self.__setFileParameter( lfn, 'Reason', "Target exists" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif res['Value'] == self.fileDict[lfn]['Target']:
gLogger.warn( "resolveTarget: skipping %s - source and target pfns are the same" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source and Target the same" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
toRemove.append( lfn )
if toRemove:
self.oTargetSE.removeFile( toRemove )
return S_OK()
def __filesToSubmit( self ):
"""
check if there is at least one file to submit
:return: S_OK if at least one file is present, S_ERROR otherwise
"""
for lfn in self.fileDict:
lfnStatus = self.fileDict[lfn].get( 'Status' )
source = self.fileDict[lfn].get( 'Source' )
target = self.fileDict[lfn].get( 'Target' )
if lfnStatus not in self.noSubmitStatus and source and target:
return S_OK()
return S_ERROR()
def __createFTSFiles( self ):
""" create LFNs file for glite-transfer-submit command
This file consists one line for each fiel to be transferred:
sourceSURL targetSURL [CHECKSUMTYPE:CHECKSUM]
:param self: self reference
"""
self.__updateMetadataCache()
for lfn in self.fileDict:
lfnStatus = self.fileDict[lfn].get( 'Status' )
if lfnStatus not in self.noSubmitStatus:
cksmStr = ""
# # add chsmType:cksm only if cksmType is specified, else let FTS decide by itself
if self.__cksmTest and self.__cksmType:
checkSum = self.catalogMetadata.get( lfn, {} ).get( 'Checksum' )
if checkSum:
cksmStr = " %s:%s" % ( self.__cksmType, intAdlerToHex( hexAdlerToInt( checkSum ) ) )
ftsFile = FTSFile()
ftsFile.LFN = lfn
ftsFile.SourceSURL = self.fileDict[lfn].get( 'Source' )
ftsFile.TargetSURL = self.fileDict[lfn].get( 'Target' )
ftsFile.SourceSE = self.sourceSE
ftsFile.TargetSE = self.targetSE
ftsFile.Status = self.fileDict[lfn].get( 'Status' )
ftsFile.Checksum = cksmStr
ftsFile.Size = self.catalogMetadata.get( lfn, {} ).get( 'Size' )
self.ftsFiles.append( ftsFile )
self.submittedFiles += 1
return S_OK()
def __createFTSJob( self, guid = None ):
self.__createFTSFiles()
ftsJob = FTSJob()
ftsJob.RequestID = 0
ftsJob.OperationID = 0
ftsJob.SourceSE = self.sourceSE
ftsJob.TargetSE = self.targetSE
ftsJob.SourceToken = self.sourceToken
ftsJob.TargetToken = self.targetToken
ftsJob.FTSServer = self.ftsServer
if guid:
ftsJob.FTSGUID = guid
for ftsFile in self.ftsFiles:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile( ftsFile )
self.ftsJob = ftsJob
def __submitFTSTransfer( self ):
""" create and execute glite-transfer-submit CLI command
:param self: self reference
"""
log = gLogger.getSubLogger( 'Submit' )
self.__createFTSJob()
submit = self.ftsJob.submitFTS2( command = self.submitCommand )
if not submit["OK"]:
log.error( "unable to submit FTSJob: %s" % submit["Message"] )
return submit
log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
# # update statuses for job files
for ftsFile in self.ftsJob:
ftsFile.FTSGUID = self.ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
self.ftsGUID = self.ftsJob.FTSGUID
return S_OK()
def __resolveFTSServer( self ):
"""
resolve FTS server to use, it should be the closest one from target SE
:param self: self reference
"""
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTSServersForSites
if not self.targetSE:
return S_ERROR( "Target SE not set" )
res = getSitesForSE( self.targetSE )
if not res['OK'] or not res['Value']:
return S_ERROR( "Could not determine target site" )
targetSites = res['Value']
targetSite = ''
for targetSite in targetSites:
targetFTS = getFTSServersForSites( [targetSite] )
if targetFTS['OK']:
ftsTarget = targetFTS['Value'][targetSite]
if ftsTarget:
self.ftsServer = ftsTarget
return S_OK( self.ftsServer )
else:
return targetFTS
return S_ERROR( 'No FTS server found for %s' % targetSite )
####################################################################
#
# Methods for monitoring
#
def summary( self, untilTerminal = False, printOutput = False ):
""" summary of FTS job
:param self: self reference
:param bool untilTerminal: flag to monitor FTS job to its final state
:param bool printOutput: flag to print out monitoring information to the stdout
"""
res = self.__isSummaryValid()
if not res['OK']:
return res
while not self.isTerminal:
res = self.__parseOutput( full = True )
if not res['OK']:
return res
if untilTerminal:
self.__print()
self.isRequestTerminal()
if res['Value'] or ( not untilTerminal ):
break
time.sleep( 1 )
if untilTerminal:
print ""
if printOutput and ( not untilTerminal ):
return self.dumpSummary( printOutput = printOutput )
return S_OK()
def monitor( self, untilTerminal = False, printOutput = False, full = True ):
""" monitor FTS job
:param self: self reference
:param bool untilTerminal: flag to monitor FTS job to its final state
:param bool printOutput: flag to print out monitoring information to the stdout
"""
if not self.ftsJob:
self.resolveSource()
self.__createFTSJob( self.ftsGUID )
res = self.__isSummaryValid()
if not res['OK']:
return res
if untilTerminal:
res = self.summary( untilTerminal = untilTerminal, printOutput = printOutput )
if not res['OK']:
return res
res = self.__parseOutput( full = full )
if not res['OK']:
return res
if untilTerminal:
self.finalize()
if printOutput:
self.dump()
return res
def dumpSummary( self, printOutput = False ):
""" get FTS job summary as str
:param self: self reference
:param bool printOutput: print summary to stdout
"""
outStr = ''
for status in sorted( self.statusSummary ):
if self.statusSummary[status]:
outStr = '%s\t%-10s : %-10s\n' % ( outStr, status, str( self.statusSummary[status] ) )
outStr = outStr.rstrip( '\n' )
if printOutput:
print outStr
return S_OK( outStr )
def __print( self ):
""" print progress bar of FTS job completeness to stdout
:param self: self reference
"""
width = 100
bits = int( ( width * self.percentageComplete ) / 100 )
outStr = "|%s>%s| %.1f%s %s %s" % ( "="*bits, " "*( width - bits ),
self.percentageComplete, "%",
self.requestStatus, " "*10 )
sys.stdout.write( "%s\r" % ( outStr ) )
sys.stdout.flush()
def dump( self ):
""" print FTS job parameters and files to stdout
:param self: self reference
"""
print "%-10s : %-10s" % ( "Status", self.requestStatus )
print "%-10s : %-10s" % ( "Source", self.sourceSE )
print "%-10s : %-10s" % ( "Target", self.targetSE )
print "%-10s : %-128s" % ( "Server", self.ftsServer )
print "%-10s : %-128s" % ( "GUID", self.ftsGUID )
for lfn in sorted( self.fileDict ):
print "\n %-15s : %-128s" % ( 'LFN', lfn )
for key in ['Source', 'Target', 'Status', 'Reason', 'Duration']:
print " %-15s : %-128s" % ( key, str( self.fileDict[lfn].get( key ) ) )
return S_OK()
def __isSummaryValid( self ):
""" check validity of FTS job summary report
:param self: self reference
"""
if not self.ftsServer:
return S_ERROR( "FTSServer not set" )
if not self.ftsGUID:
return S_ERROR( "FTSGUID not set" )
return S_OK()
def __parseOutput( self, full = False ):
""" execute glite-transfer-status command and parse its output
:param self: self reference
:param bool full: glite-transfer-status verbosity level, when set, collect information of files as well
"""
monitor = self.ftsJob.monitorFTS2( command = self.monitorCommand, full = full )
if not monitor['OK']:
return monitor
self.percentageComplete = self.ftsJob.Completeness
self.requestStatus = self.ftsJob.Status
self.submitTime = self.ftsJob.SubmitTime
statusSummary = monitor['Value']
if statusSummary:
for state in statusSummary:
self.statusSummary[state] = statusSummary[state]
self.transferTime = 0
for ftsFile in self.ftsJob:
lfn = ftsFile.LFN
self.__setFileParameter( lfn, 'Status', ftsFile.Status )
self.__setFileParameter( lfn, 'Reason', ftsFile.Error )
self.__setFileParameter( lfn, 'Duration', ftsFile._duration )
targetURL = self.__getFileParameter( lfn, 'Target' )
if not targetURL['OK']:
self.__setFileParameter( lfn, 'Target', ftsFile.TargetSURL )
self.transferTime += int( ftsFile._duration )
return S_OK()
####################################################################
#
# Methods for finalization
#
def finalize( self ):
""" finalize FTS job
:param self: self reference
"""
self.__updateMetadataCache()
transEndTime = dateTime()
regStartTime = time.time()
res = self.getTransferStatistics()
transDict = res['Value']
res = self.__registerSuccessful( transDict['transLFNs'] )
regSuc, regTotal = res['Value']
regTime = time.time() - regStartTime
if self.sourceSE and self.targetSE:
self.__sendAccounting( regSuc, regTotal, regTime, transEndTime, transDict )
return S_OK()
def getTransferStatistics( self ):
""" collect information of Transfers that can be used by Accounting
:param self: self reference
"""
transDict = { 'transTotal': len( self.fileDict ),
'transLFNs': [],
'transOK': 0,
'transSize': 0 }
for lfn in self.fileDict:
if self.fileDict[lfn].get( 'Status' ) in self.successfulStates:
if self.fileDict[lfn].get( 'Duration', 0 ):
transDict['transLFNs'].append( lfn )
transDict['transOK'] += 1
if lfn in self.catalogMetadata:
transDict['transSize'] += self.catalogMetadata[lfn].get( 'Size', 0 )
return S_OK( transDict )
def getFailedRegistrations( self ):
""" get failed registrations dict
:param self: self reference
"""
return S_OK( self.failedRegistrations )
def __registerSuccessful( self, transLFNs ):
""" register successfully transferred files to the catalogs,
fill failedRegistrations dict for files that failed to register
:param self: self reference
:param list transLFNs: LFNs in FTS job
"""
self.failedRegistrations = {}
toRegister = {}
for lfn in transLFNs:
res = returnSingleResult( self.oTargetSE.getURL( self.fileDict[lfn].get( 'Target' ), protocol = 'srm' ) )
if not res['OK']:
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
toRegister[lfn] = { 'PFN' : res['Value'], 'SE' : self.targetSE }
if not toRegister:
return S_OK( ( 0, 0 ) )
res = self.__getCatalogObject()
if not res['OK']:
for lfn in toRegister:
self.failedRegistrations = toRegister
self.log.error( 'Failed to get Catalog Object', res['Message'] )
return S_OK( ( 0, len( toRegister ) ) )
res = self.oCatalog.addReplica( toRegister )
if not res['OK']:
self.failedRegistrations = toRegister
self.log.error( 'Failed to get Catalog Object', res['Message'] )
return S_OK( ( 0, len( toRegister ) ) )
for lfn, error in res['Value']['Failed'].items():
self.failedRegistrations[lfn] = toRegister[lfn]
self.log.error( 'Registration of Replica failed', '%s : %s' % ( lfn, str( error ) ) )
return S_OK( ( len( res['Value']['Successful'] ), len( toRegister ) ) )
def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
""" send accounting record
:param self: self reference
:param regSuc: number of files successfully registered
:param regTotal: number of files attepted to register
:param regTime: time stamp at the end of registration
:param transEndTime: time stamp at the end of FTS job
:param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
"""
oAccounting = DataOperation()
oAccounting.setEndTime( transEndTime )
oAccounting.setStartTime( self.submitTime )
accountingDict = {}
accountingDict['OperationType'] = 'replicateAndRegister'
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
accountingDict['RegistrationTime'] = regTime
accountingDict['RegistrationOK'] = regSuc
accountingDict['RegistrationTotal'] = regTotal
accountingDict['TransferOK'] = transDict['transOK']
accountingDict['TransferTotal'] = transDict['transTotal']
accountingDict['TransferSize'] = transDict['transSize']
accountingDict['FinalStatus'] = self.requestStatus
accountingDict['Source'] = self.sourceSE
accountingDict['Destination'] = self.targetSE
accountingDict['TransferTime'] = self.transferTime
oAccounting.setValuesFromDict( accountingDict )
self.log.verbose( "Attempting to commit accounting message..." )
oAccounting.commit()
self.log.verbose( "...committed." )
return S_OK()
| miloszz/DIRAC | DataManagementSystem/Client/FTSRequest.py | Python | gpl-3.0 | 37,261 | 0.02818 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import logging
import re
import shlex
import sys
import time
import os
from webkitpy.common.system import path
from webkitpy.common.system.profiler import ProfilerFactory
_log = logging.getLogger(__name__)
DRIVER_START_TIMEOUT_SECS = 30
class DriverInput(object):
def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args):
self.test_name = test_name
self.timeout = timeout # in ms
self.image_hash = image_hash
self.should_run_pixel_test = should_run_pixel_test
self.args = args
class DriverOutput(object):
"""Groups information about a output from driver for easy passing
and post-processing of data."""
def __init__(self, text, image, image_hash, audio, crash=False,
test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
crashed_pid=None, crash_log=None, leak=False, leak_log=None, pid=None):
# FIXME: Args could be renamed to better clarify what they do.
self.text = text
self.image = image # May be empty-string if the test crashes.
self.image_hash = image_hash
self.image_diff = None # image_diff gets filled in after construction.
self.audio = audio # Binary format is port-dependent.
self.crash = crash
self.crashed_process_name = crashed_process_name
self.crashed_pid = crashed_pid
self.crash_log = crash_log
self.leak = leak
self.leak_log = leak_log
self.test_time = test_time
self.measurements = measurements
self.timeout = timeout
self.error = error # stderr output
self.pid = pid
def has_stderr(self):
return bool(self.error)
class DeviceFailure(Exception):
pass
class Driver(object):
"""object for running test(s) using content_shell or other driver."""
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
"""Initialize a Driver to subsequently run tests.
Typically this routine will spawn content_shell in a config
ready for subsequent input.
port - reference back to the port object.
worker_number - identifier for a particular worker/driver instance
"""
self._port = port
self._worker_number = worker_number
self._no_timeout = no_timeout
self._driver_tempdir = None
# content_shell can report back subprocess crashes by printing
# "#CRASHED - PROCESSNAME". Since those can happen at any time
# and ServerProcess won't be aware of them (since the actual tool
# didn't crash, just a subprocess) we record the crashed subprocess name here.
self._crashed_process_name = None
self._crashed_pid = None
# content_shell can report back subprocesses that became unresponsive
# This could mean they crashed.
self._subprocess_was_unresponsive = False
# content_shell can report back subprocess DOM-object leaks by printing
# "#LEAK". This leak detection is enabled only when the flag
# --enable-leak-detection is passed to content_shell.
self._leaked = False
# stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
# stderr output, as well as if we've seen #EOF on this driver instance.
# FIXME: We should probably remove _read_first_block and _read_optional_image_block and
# instead scope these locally in run_test.
self.error_from_test = str()
self.err_seen_eof = False
self._server_process = None
self._current_cmd_line = None
self._measurements = {}
if self._port.get_option("profile"):
profiler_name = self._port.get_option("profiler")
self._profiler = ProfilerFactory.create_profiler(self._port.host,
self._port._path_to_driver(), self._port.results_directory(), profiler_name)
else:
self._profiler = None
def __del__(self):
self.stop()
def run_test(self, driver_input, stop_when_done):
"""Run a single test and return the results.
Note that it is okay if a test times out or crashes and leaves
the driver in an indeterminate state. The upper layers of the program
are responsible for cleaning up and ensuring things are okay.
Returns a DriverOutput object.
"""
start_time = time.time()
stdin_deadline = start_time + int(driver_input.timeout) / 2000.0
self.start(driver_input.should_run_pixel_test, driver_input.args, stdin_deadline)
test_begin_time = time.time()
self.error_from_test = str()
self.err_seen_eof = False
command = self._command_from_driver_input(driver_input)
deadline = test_begin_time + int(driver_input.timeout) / 1000.0
self._server_process.write(command)
text, audio = self._read_first_block(deadline) # First block is either text or audio
image, actual_image_hash = self._read_optional_image_block(deadline) # The second (optional) block is image data.
crashed = self.has_crashed()
timed_out = self._server_process.timed_out
pid = self._server_process.pid()
leaked = self._leaked
if not crashed:
sanitizer = self._port.output_contains_sanitizer_messages(self.error_from_test)
if sanitizer:
self.error_from_test = 'OUTPUT CONTAINS "' + sanitizer + '", so we are treating this test as if it crashed, even though it did not.\n\n' + self.error_from_test
crashed = True
self._crashed_process_name = "unknown process name"
self._crashed_pid = 0
if stop_when_done or crashed or timed_out or leaked:
# We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
# In the timeout case, we kill the hung process as well.
out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
if out:
text += out
if err:
self.error_from_test += err
self._server_process = None
crash_log = None
if crashed:
self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
if not crash_log:
pid_str = str(self._crashed_pid) if self._crashed_pid else "unknown pid"
crash_log = 'No crash log found for %s:%s.\n' % (self._crashed_process_name, pid_str)
# If we were unresponsive append a message informing there may not have been a crash.
if self._subprocess_was_unresponsive:
crash_log += 'Process failed to become responsive before timing out.\n'
# Print stdout and stderr to the placeholder crash log; we want as much context as possible.
if self.error_from_test:
crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (text, self.error_from_test)
return DriverOutput(text, image, actual_image_hash, audio,
crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
timeout=timed_out, error=self.error_from_test,
crashed_process_name=self._crashed_process_name,
crashed_pid=self._crashed_pid, crash_log=crash_log,
leak=leaked, leak_log=self._leak_log,
pid=pid)
def _get_crash_log(self, stdout, stderr, newer_than):
return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
# FIXME: Seems this could just be inlined into callers.
@classmethod
def _command_wrapper(cls, wrapper_option):
# Hook for injecting valgrind or other runtime instrumentation,
# used by e.g. tools/valgrind/valgrind_tests.py.
return shlex.split(wrapper_option) if wrapper_option else []
HTTP_DIR = "http/tests/"
HTTP_LOCAL_DIR = "http/tests/local/"
def is_http_test(self, test_name):
return test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)
def test_to_uri(self, test_name):
"""Convert a test name to a URI.
Tests which have an 'https' directory in their paths (e.g.
'/http/tests/security/mixedContent/https/test1.html') or '.https.' in
their name (e.g. 'http/tests/security/mixedContent/test1.https.html') will
be loaded over HTTPS; all other tests over HTTP.
"""
if not self.is_http_test(test_name):
return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name))
relative_path = test_name[len(self.HTTP_DIR):]
if "/https/" in test_name or ".https." in test_name:
return "https://127.0.0.1:8443/" + relative_path
return "http://127.0.0.1:8000/" + relative_path
def uri_to_test(self, uri):
"""Return the base layout test name for a given URI.
This returns the test name for a given URI, e.g., if you passed in
"file:///src/LayoutTests/fast/html/keygen.html" it would return
"fast/html/keygen.html".
"""
if uri.startswith("file:///"):
prefix = path.abspath_to_uri(self._port.host.platform, self._port.layout_tests_dir())
if not prefix.endswith('/'):
prefix += '/'
return uri[len(prefix):]
if uri.startswith("http://"):
return uri.replace('http://127.0.0.1:8000/', self.HTTP_DIR)
if uri.startswith("https://"):
return uri.replace('https://127.0.0.1:8443/', self.HTTP_DIR)
raise NotImplementedError('unknown url type: %s' % uri)
def has_crashed(self):
if self._server_process is None:
return False
if self._crashed_process_name:
return True
if self._server_process.has_crashed():
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
return True
return False
def start(self, pixel_tests, per_test_args, deadline):
new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
if not self._server_process or new_cmd_line != self._current_cmd_line:
self._start(pixel_tests, per_test_args)
self._run_post_start_tasks()
def _setup_environ_for_driver(self, environment):
if self._profiler:
environment = self._profiler.adjusted_environment(environment)
return environment
def _start(self, pixel_tests, per_test_args, wait_for_ready=True):
self.stop()
self._driver_tempdir = self._port._filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
server_name = self._port.driver_name()
environment = self._port.setup_environ_for_server(server_name)
environment = self._setup_environ_for_driver(environment)
self._crashed_process_name = None
self._crashed_pid = None
self._leaked = False
self._leak_log = None
cmd_line = self.cmd_line(pixel_tests, per_test_args)
self._server_process = self._port._server_process_constructor(self._port, server_name, cmd_line, environment, logging=self._port.get_option("driver_logging"))
self._server_process.start()
self._current_cmd_line = cmd_line
if wait_for_ready:
deadline = time.time() + DRIVER_START_TIMEOUT_SECS
if not self._wait_for_server_process_output(self._server_process, deadline, '#READY'):
_log.error("content_shell took too long to startup.")
def _wait_for_server_process_output(self, server_process, deadline, text):
output = ''
line = server_process.read_stdout_line(deadline)
while not server_process.timed_out and not server_process.has_crashed() and not text in line.rstrip():
output += line
line = server_process.read_stdout_line(deadline)
if server_process.timed_out or server_process.has_crashed():
_log.error('Failed to start the %s process: \n%s' % (server_process.name(), output))
return False
return True
def _run_post_start_tasks(self):
# Remote drivers may override this to delay post-start tasks until the server has ack'd.
if self._profiler:
self._profiler.attach_to_pid(self._pid_on_target())
def _pid_on_target(self):
# Remote drivers will override this method to return the pid on the device.
return self._server_process.pid()
def stop(self, timeout_secs=0.0):
if self._server_process:
self._server_process.stop(timeout_secs)
self._server_process = None
if self._profiler:
self._profiler.profile_after_exit()
if self._driver_tempdir:
self._port._filesystem.rmtree(str(self._driver_tempdir))
self._driver_tempdir = None
self._current_cmd_line = None
def cmd_line(self, pixel_tests, per_test_args):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
if self._no_timeout:
cmd.append('--no-timeout')
cmd.extend(self._port.get_option('additional_driver_flag', []))
cmd.extend(self._port.additional_driver_flag())
if self._port.get_option('enable_leak_detection'):
cmd.append('--enable-leak-detection')
cmd.extend(per_test_args)
cmd.append('-')
return cmd
def _check_for_driver_crash(self, error_line):
if error_line == "#CRASHED\n":
# This is used on Windows to report that the process has crashed
# See http://trac.webkit.org/changeset/65537.
self._crashed_process_name = self._server_process.name()
self._crashed_pid = self._server_process.pid()
elif (error_line.startswith("#CRASHED - ")
or error_line.startswith("#PROCESS UNRESPONSIVE - ")):
# WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
match = re.match('#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line)
self._crashed_process_name = match.group(1) if match else 'WebProcess'
match = re.search('pid (\d+)', error_line)
pid = int(match.group(1)) if match else None
self._crashed_pid = pid
# FIXME: delete this after we're sure this code is working :)
_log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
if error_line.startswith("#PROCESS UNRESPONSIVE - "):
self._subprocess_was_unresponsive = True
self._port.sample_process(self._crashed_process_name, self._crashed_pid)
# We want to show this since it's not a regular crash and probably we don't have a crash log.
self.error_from_test += error_line
return True
return self.has_crashed()
def _check_for_leak(self, error_line):
if error_line.startswith("#LEAK - "):
self._leaked = True
match = re.match('#LEAK - (\S+) pid (\d+) (.+)\n', error_line)
self._leak_log = match.group(3)
return self._leaked
def _command_from_driver_input(self, driver_input):
# FIXME: performance tests pass in full URLs instead of test names.
if driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://') or driver_input.test_name == ('about:blank'):
command = driver_input.test_name
elif self.is_http_test(driver_input.test_name):
command = self.test_to_uri(driver_input.test_name)
else:
command = self._port.abspath_for_test(driver_input.test_name)
if sys.platform == 'cygwin':
command = path.cygpath(command)
assert not driver_input.image_hash or driver_input.should_run_pixel_test
# ' is the separator between arguments.
if self._port.supports_per_test_timeout():
command += "'--timeout'%s" % driver_input.timeout
if driver_input.should_run_pixel_test:
command += "'--pixel-test"
if driver_input.image_hash:
command += "'" + driver_input.image_hash
return command + "\n"
def _read_first_block(self, deadline):
# returns (text_content, audio_content)
block = self._read_block(deadline)
if block.malloc:
self._measurements['Malloc'] = float(block.malloc)
if block.js_heap:
self._measurements['JSHeap'] = float(block.js_heap)
if block.content_type == 'audio/wav':
return (None, block.decoded_content)
return (block.decoded_content, None)
def _read_optional_image_block(self, deadline):
# returns (image, actual_image_hash)
block = self._read_block(deadline, wait_for_stderr_eof=True)
if block.content and block.content_type == 'image/png':
return (block.decoded_content, block.content_hash)
return (None, block.content_hash)
def _read_header(self, block, line, header_text, header_attr, header_filter=None):
if line.startswith(header_text) and getattr(block, header_attr) is None:
value = line.split()[1]
if header_filter:
value = header_filter(value)
setattr(block, header_attr, value)
return True
return False
def _process_stdout_line(self, block, line):
if (self._read_header(block, line, 'Content-Type: ', 'content_type')
or self._read_header(block, line, 'Content-Transfer-Encoding: ', 'encoding')
or self._read_header(block, line, 'Content-Length: ', '_content_length', int)
or self._read_header(block, line, 'ActualHash: ', 'content_hash')
or self._read_header(block, line, 'DumpMalloc: ', 'malloc')
or self._read_header(block, line, 'DumpJSHeap: ', 'js_heap')
or self._read_header(block, line, 'StdinPath', 'stdin_path')):
return
# Note, we're not reading ExpectedHash: here, but we could.
# If the line wasn't a header, we just append it to the content.
block.content += line
def _strip_eof(self, line):
if line and line.endswith("#EOF\n"):
return line[:-5], True
if line and line.endswith("#EOF\r\n"):
_log.error("Got a CRLF-terminated #EOF - this is a driver bug.")
return line[:-6], True
return line, False
def _read_block(self, deadline, wait_for_stderr_eof=False):
block = ContentBlock()
out_seen_eof = False
while not self.has_crashed():
if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
break
if self.err_seen_eof:
out_line = self._server_process.read_stdout_line(deadline)
err_line = None
elif out_seen_eof:
out_line = None
err_line = self._server_process.read_stderr_line(deadline)
else:
out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
if self._server_process.timed_out or self.has_crashed():
break
if out_line:
assert not out_seen_eof
out_line, out_seen_eof = self._strip_eof(out_line)
if err_line:
assert not self.err_seen_eof
err_line, self.err_seen_eof = self._strip_eof(err_line)
if out_line:
if out_line[-1] != "\n":
_log.error("Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug.")
content_length_before_header_check = block._content_length
self._process_stdout_line(block, out_line)
# FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
# Don't wait until we're done with headers, just read the binary blob right now.
if content_length_before_header_check != block._content_length:
if block._content_length > 0:
block.content = self._server_process.read_stdout(deadline, block._content_length)
else:
_log.error("Received content of type %s with Content-Length of 0! This indicates a bug in %s.",
block.content_type, self._server_process.name())
if err_line:
if self._check_for_driver_crash(err_line):
break
if self._check_for_leak(err_line):
break
self.error_from_test += err_line
block.decode_content()
return block
class ContentBlock(object):
def __init__(self):
self.content_type = None
self.encoding = None
self.content_hash = None
self._content_length = None
# Content is treated as binary data even though the text output is usually UTF-8.
self.content = str() # FIXME: Should be bytearray() once we require Python 2.6.
self.decoded_content = None
self.malloc = None
self.js_heap = None
self.stdin_path = None
def decode_content(self):
if self.encoding == 'base64' and self.content is not None:
self.decoded_content = base64.b64decode(self.content)
else:
self.decoded_content = self.content
| XiaosongWei/blink-crosswalk | Tools/Scripts/webkitpy/layout_tests/port/driver.py | Python | bsd-3-clause | 23,508 | 0.003148 |
#!/usr/bin/env python
# encoding: utf-8
'''
Created on September 24, 2019
@author: David Moss
'''
import time
# Time conversions to ms
ONE_SECOND_MS = 1000
ONE_MINUTE_MS = 60 * ONE_SECOND_MS
ONE_HOUR_MS = ONE_MINUTE_MS * 60
ONE_DAY_MS = ONE_HOUR_MS * 24
ONE_WEEK_MS = ONE_DAY_MS * 7
ONE_MONTH_MS = ONE_DAY_MS * 30
ONE_YEAR_MS = ONE_DAY_MS * 365
# Timestamped commands
COMMAND_DELETE = -2
COMMAND_SET_STATUS_HIDDEN = -1
COMMAND_SET_STATUS_GOOD = 0
COMMAND_SET_STATUS_WARNING = 1
COMMAND_SET_STATUS_CRITICAL = 2
# Data Stream Address
DATASTREAM_ADDRESS = "update_dashboard_content"
# # Data Stream Content
# DATASTREAM_CONTENT = {
# "type": 0,
# "title": "NOW",
# "weight": 0,
# "content": {
# "status": 0,
# "comment": "Left the house once today.",
# "weight": 25,
# "id": "leave",
# "icon": "house-leave",
# "icon_font": "far",
# "alarms": {
# int(time.time() * 1000) + (ONE_SECOND_MS * 600): COMMAND_DELETE,
# }
# }
# }
# # Data Stream Content
# DATASTREAM_CONTENT = {
# "type": 0,
# "title": "NOW",
# "weight": 0,
# "content": {
# "status": 0,
# "comment": "81% sleep score.",
# "weight": 20,
# "id": "sleep",
# "icon": "snooze",
# "icon_font": "far",
# "alarms": {
# int(time.time() * 1000) + (ONE_SECOND_MS * 600): COMMAND_DELETE,
# }
# }
# }
# Data Stream Content
DATASTREAM_CONTENT = {
"type": 0,
"title": "NOW",
"weight": 0,
"content": {
"status": 0,
"comment": "Judy Bessee reached out today.",
"weight": 30,
"id": "temporary",
"icon": "comment-smile",
"icon_font": "far",
"alarms": {
int(time.time() * 1000) + (ONE_SECOND_MS * 600): COMMAND_DELETE,
}
}
}
# input function behaves differently in Python 2.x and 3.x. And there is no raw_input in 3.x.
if hasattr(__builtins__, 'raw_input'):
input=raw_input
import requests
import sys
import json
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-u", "--username", dest="username", help="Username")
parser.add_argument("-p", "--password", dest="password", help="Password")
parser.add_argument("-s", "--server", dest="server", help="Base server URL (app.presencepro.com)")
parser.add_argument("-l", "--location", dest="location_id", help="Location ID")
parser.add_argument("-a", "--api_key", dest="apikey", help="User's API key instead of a username/password")
parser.add_argument("--httpdebug", dest="httpdebug", action="store_true", help="HTTP debug logger output");
# Process arguments
args, unknown = parser.parse_known_args()
# Extract the arguments
username = args.username
password = args.password
server = args.server
httpdebug = args.httpdebug
app_key = args.apikey
location_id = args.location_id
if location_id is not None:
location_id = int(location_id)
print(Color.BOLD + "Location ID: {}".format(location_id) + Color.END)
# Define the bot server
if not server:
server = "https://app.presencepro.com"
if "http" not in server:
server = "https://" + server
# HTTP Debugging
if httpdebug:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Login to your user account
if app_key is None:
app_key, user_info = _login(server, username, password)
send_datastream_message(server, app_key, location_id, DATASTREAM_ADDRESS, DATASTREAM_CONTENT)
print("Done!")
def send_datastream_message(server, app_key, location_id, address, content):
http_headers = {"API_KEY": app_key, "Content-Type": "application/json"}
params = {
"address": address,
"scope": 1,
"locationId": location_id
}
body = {
"feed": content
}
print("Body: " + json.dumps(body, indent=2, sort_keys=True))
print("Server: " + server)
r = requests.post(server + "/cloud/appstore/stream/", params=params, data=json.dumps(body), headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
print(str(r.text))
def _login(server, username, password):
"""Get an Bot API key and User Info by login with a username and password"""
if not username:
username = raw_input('Email address: ')
if not password:
import getpass
password = getpass.getpass('Password: ')
try:
import requests
# login by username and password
http_headers = {"PASSWORD": password, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/login", params={"username":username}, headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
app_key = j['key']
# get user info
http_headers = {"API_KEY": app_key, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/user", headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
return app_key, j
except BotError as e:
sys.stderr.write("Error: " + e.msg)
sys.stderr.write("\nCreate an account on " + server + " and use it to sign in")
sys.stderr.write("\n\n")
raise e
def _check_for_errors(json_response):
"""Check some JSON response for BotEngine errors"""
if not json_response:
raise BotError("No response from the server!", -1)
if json_response['resultCode'] > 0:
msg = "Unknown error!"
if 'resultCodeMessage' in json_response.keys():
msg = json_response['resultCodeMessage']
elif 'resultCodeDesc' in json_response.keys():
msg = json_response['resultCodeDesc']
raise BotError(msg, json_response['resultCode'])
del(json_response['resultCode'])
class BotError(Exception):
"""BotEngine exception to raise and log errors."""
def __init__(self, msg, code):
super(BotError).__init__(type(self))
self.msg = msg
self.code = code
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
#===============================================================================
# Color Class for CLI
#===============================================================================
class Color:
"""Color your command line output text with Color.WHATEVER and Color.END"""
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
if __name__ == "__main__":
sys.exit(main())
| peoplepower/botlab | com.ppc.Microservices/intelligence/dashboard/tools/set_status.py | Python | apache-2.0 | 7,607 | 0.00723 |
# Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats)
import tarfile
import warnings
from test import test_support
from test.test_support import TESTFN, check_warnings, captured_stdout
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_data(join(src_dir, 'test.txt'), '123')
write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
if hasattr(os, "symlink"):
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
f = open(src, 'w')
f.write('cheddar')
f.close()
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
try:
shutil.rmtree(TESTFN)
except OSError:
pass
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write("spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
if os.path.exists(dst_dir):
os.rmdir(dst_dir)
def test_main():
test_support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
| ktan2020/legacy-automation | win/Lib/test/test_shutil.py | Python | mit | 30,473 | 0.001313 |
import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(java_heap_GB=14)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_mnist_A_training(self):
importFolderPath = "mnist"
csvFilelist = [
("mnist_training.csv.gz", 600),
("mnist_training.csv.gz", 600),
]
trial = 0
allDelta = []
for (csvFilename, timeoutSecs) in csvFilelist:
testKey2 = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath+"/"+csvFilename,
hex_key=testKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
def test_parse_mnist_B_testing(self):
importFolderPath = "mnist"
csvFilelist = [
("mnist_testing.csv.gz", 600),
("mnist_testing.csv.gz", 600),
]
trial = 0
allDelta = []
for (csvFilename, timeoutSecs) in csvFilelist:
testKey2 = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath+"/"+csvFilename,
hex_key=testKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
if __name__ == '__main__':
h2o.unit_main()
| vbelakov/h2o | py/testdir_single_jvm/test_parse_mnist_fvec.py | Python | apache-2.0 | 2,023 | 0.006426 |
shader_code = """
<script id="orbit_shader-vs" type="x-shader/x-vertex">
uniform vec3 focus;
uniform vec3 aef;
uniform vec3 omegaOmegainc;
attribute float lintwopi;
varying float lin;
uniform mat4 mvp;
const float M_PI = 3.14159265359;
void main() {
float a = aef.x;
float e = aef.y;
float f = aef.z+lintwopi;
lin = lintwopi/(M_PI*2.);
if (e>1.){
float theta_max = acos(-1./e);
f = 0.0001-theta_max+1.9998*lin*theta_max;
lin = sqrt(min(0.5,lin));
}
float omega = omegaOmegainc.x;
float Omega = omegaOmegainc.y;
float inc = omegaOmegainc.z;
float r = a*(1.-e*e)/(1. + e*cos(f));
float cO = cos(Omega);
float sO = sin(Omega);
float co = cos(omega);
float so = sin(omega);
float cf = cos(f);
float sf = sin(f);
float ci = cos(inc);
float si = sin(inc);
vec3 pos = vec3(r*(cO*(co*cf-so*sf) - sO*(so*cf+co*sf)*ci),r*(sO*(co*cf-so*sf) + cO*(so*cf+co*sf)*ci),+ r*(so*cf+co*sf)*si);
gl_Position = mvp*(vec4(focus+pos, 1.0));
}
</script>
<script id="orbit_shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying float lin;
void main() {
float fog = max(max(0.,-1.+2.*gl_FragCoord.z),max(0.,1.-2.*gl_FragCoord.z));
gl_FragColor = vec4(1.,1.,1.,sqrt(lin)*(1.-fog));
}
</script>
<script id="point_shader-vs" type="x-shader/x-vertex">
attribute vec3 vp;
uniform mat4 mvp;
//uniform vec4 vc;
//varying vec4 color;
void main() {
gl_PointSize = 15.0;
gl_Position = mvp*vec4(vp, 1.0);
//color = vc;
}
</script>
<script id="point_shader-fs" type="x-shader/x-fragment">
precision mediump float;
//varying vec4 color;
void main() {
vec2 rel = gl_PointCoord.st;
rel.s -=0.5;
rel.t -=0.5;
if (length(rel)>0.25){
gl_FragColor = vec4(0.,0.,0.,0.);
}else{
vec4 cmod = vec4(1.,1.,1.,1.);
float fog = max(max(0.,-1.+2.*gl_FragCoord.z),max(0.,1.-2.*gl_FragCoord.z));
cmod.a*= (1.-fog)*min(1.,1.-4.*(length(rel)/0.25-0.75));
gl_FragColor = cmod;
}
}
</script>
"""
js_code = """
<script>
function compileShader(glr, shaderSource, shaderType) {
// Create the shader object
var shader = glr.createShader(shaderType);
// Set the shader source code.
glr.shaderSource(shader, shaderSource);
// Compile the shader
glr.compileShader(shader);
// Check if it compiled
var success = glr.getShaderParameter(shader, glr.COMPILE_STATUS);
if (!success) {
// Something went wrong during compilation; get the error
throw "could not compile shader:" + glr.getShaderInfoLog(shader);
}
return shader;
}
function createShaderFromScript(glr, scriptId, opt_shaderType) {
// look up the script tag by id.
var shaderScript = document.getElementById(scriptId);
if (!shaderScript) {
throw("*** Error: unknown script element" + scriptId);
}
// extract the contents of the script tag.
var shaderSource = shaderScript.text;
// If we didn't pass in a type, use the 'type' from
// the script tag.
if (!opt_shaderType) {
if (shaderScript.type == "x-shader/x-vertex") {
opt_shaderType = glr.VERTEX_SHADER;
} else if (shaderScript.type == "x-shader/x-fragment") {
opt_shaderType = glr.FRAGMENT_SHADER;
} else if (!opt_shaderType) {
throw("*** Error: shader type not set");
}
}
return compileShader(glr, shaderSource, opt_shaderType);
};
function createProgramFromScripts( glr, vertexShaderId, fragmentShaderId) {
var vertexShader = createShaderFromScript(glr, vertexShaderId, glr.VERTEX_SHADER);
var fragmentShader = createShaderFromScript(glr, fragmentShaderId, glr.FRAGMENT_SHADER);
var program = glr.createProgram();
// attach the shaders.
glr.attachShader(program, vertexShader);
glr.attachShader(program, fragmentShader);
// link the program.
glr.linkProgram(program);
// Check if it linked.
var success = glr.getProgramParameter(program, glr.LINK_STATUS);
if (!success) {
// something went wrong with the link
throw ("program filed to link:" + glr.getProgramInfoLog (program));
}
return program;
}
function quat2mat(A,mat){
var xx = A.x*A.x; var xy = A.x*A.y; var xz = A.x*A.z;
var xw = A.x*A.w; var yy = A.y*A.y; var yz = A.y*A.z;
var yw = A.y*A.w; var zz = A.z*A.z; var zw = A.z*A.w;
mat[0] = 1.-2.*(yy+zz);
mat[1] = 2.*(xy-zw);
mat[2] = 2.*(xz+yw);
mat[4] = 2.*(xy+zw);
mat[5] = 1.-2.*(xx+zz);
mat[6] = 2.*(yz-xw);
mat[8] = 2.*(xz-yw);
mat[9] = 2.*(yz+xw);
mat[10]= 1.-2.*(xx+yy);
mat[3] = mat[7] = mat[11] = mat[12] = mat[13] = mat[14] = 0.; mat[15]= 1.;
}
function multvec(A, B, vecr){
var mat = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
quat2mat(A,mat);
vecr[0] = mat[0]*B[0] + mat[1]*B[1] + mat[2]*B[2];
vecr[1] = mat[4]*B[0] + mat[5]*B[1] + mat[6]*B[2];
vecr[2] = mat[8]*B[0] + mat[9]*B[1] + mat[10]*B[2];
}
function mattransp(mat){
var matt = [
mat[0], mat[4], mat[8], mat[12],
mat[1], mat[5], mat[9], mat[13],
mat[2], mat[6], mat[10], mat[14],
mat[3], mat[7], mat[11], mat[15]];
return matt;
}
function conjugate(quat){
var cquat = {x:-quat.x, y:-quat.y, z:-quat.z, w:quat.w};
return cquat;
}
function mult(A, B){
var mquat = { x: A.w*B.x + A.x*B.w + A.y*B.z - A.z*B.y,
y: A.w*B.y - A.x*B.z + A.y*B.w + A.z*B.x,
z: A.w*B.z + A.x*B.y - A.y*B.x + A.z*B.w,
w: A.w*B.w - A.x*B.x - A.y*B.y - A.z*B.z};
return mquat;
}
function normalize(quat){
var L = Math.sqrt(quat.x*quat.x + quat.y*quat.y + quat.z*quat.z + quat.w*quat.w);
var nquat = {x:quat.x/L, y:quat.y/L, z:quat.z/L, w:quat.w/L};
return nquat;
}
function matortho(mat, l, r, b, t, n, f){
mat[0] = 2./(r-l); mat[1] = 0.; mat[2] = 0.; mat[3] = -(r+l)/(r-l);
mat[4] = 0.; mat[5] = 2./(t-b); mat[6] = 0.; mat[7] = -(t+b)/(t-b);
mat[8] = 0.; mat[9] = 0.; mat[10] = -2./(f-n); mat[11] = -(f+n)/(f-n);
mat[12] = 0.; mat[13] = 0.; mat[14] = 0.; mat[15] = 1.;
}
function matmult(A,B,C){
for(i=0;i<4;i++){
for(j=0;j<4;j++){
C[i+4*j] = 0.;
for(k=0;k<4;k++){
C[i+4*j] += A[k+4*j]*B[i+4*k];
}}}
}
function startGL(reboundView) {
var canvas = document.getElementById("reboundcanvas-"+reboundView.cid);
if (!canvas){
reboundView.startCount = reboundView.startCount+1;
if (reboundView.startCount>1000){
console.log("Cannot find element.");
}else{
setTimeout(function(){ startGL(reboundView); }, 10);
}
return;
}
var rect = canvas.getBoundingClientRect()
reboundView.ratio = rect.width/rect.height;
reboundView.view = normalize({x:reboundView.orientation[0], y:reboundView.orientation[1], z:reboundView.orientation[2], w:reboundView.orientation[3]});
canvas.addEventListener('mousedown', function() {
reboundView.mouseDown=1;
}, false);
canvas.addEventListener('mouseup', function() {
reboundView.mouseDown=0;
}, false);
canvas.addEventListener('mouseleave', function() {
reboundView.mouseDown=0;
}, false);
canvas.addEventListener('mousemove', function(evt) {
var rect = canvas.getBoundingClientRect()
if (reboundView.mouseDown==1){
reboundView.mouseDown = 2;
reboundView.mouse_x = evt.clientX-rect.left;
reboundView.mouse_y = evt.clientY-rect.top;
return;
}else if (reboundView.mouseDown==2){
var width = rect.width;
var height = rect.height;
var dx = 3.*(evt.clientX-rect.left-reboundView.mouse_x)/width;
var dy = 3.*(evt.clientY-rect.top-reboundView.mouse_y)/height;
reboundView.mouse_x = evt.clientX-rect.left;
reboundView.mouse_y = evt.clientY-rect.top;
if (evt.shiftKey){
reboundView.scale *= (1.+dx+dy);
}else{
var inv = conjugate(reboundView.view);
var up = [0.,1.,0.];
var right = [1.,0.,0.];
var inv_up = [0.,0.,0.];
var inv_right = [0.,0.,0.];
multvec(inv, right, inv_right);
multvec(inv, up, inv_up);
var sin_dy = Math.sin(dy);
var rot_dy = {x:inv_right[0]*sin_dy, y:inv_right[1]*sin_dy, z:inv_right[2]*sin_dy, w:Math.cos(dy)};
reboundView.view = mult(reboundView.view, normalize(rot_dy));
var sin_dx = Math.sin(dx);
var rot_dx = {x:inv_up[0]*sin_dx, y:inv_up[1]*sin_dx, z:inv_up[2]*sin_dx, w:Math.cos(dx)};
reboundView.view = normalize(mult(reboundView.view, normalize(rot_dx)));
}
drawGL(reboundView);
}
}, false);
reboundView.gl = canvas.getContext("webgl")||canvas.getContext("experimental-webgl");
if (!reboundView.gl) {
alert("Unable to initialize WebGL. Your browser may not support it.");
return;
}
var gl = reboundView.gl
gl.enable(gl.BLEND);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA);
reboundView.orbit_shader_program = createProgramFromScripts(gl,"orbit_shader-vs","orbit_shader-fs");
reboundView.point_shader_program = createProgramFromScripts(gl,"point_shader-vs","point_shader-fs");
var lintwopi = new Float32Array(500);
for(i=0;i<500;i++){
lintwopi[i] = 2.*Math.PI/500.*i;
}
reboundView.orbit_lintwopi_buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.orbit_lintwopi_buffer);
gl.bufferData(gl.ARRAY_BUFFER, 4*500, gl.STATIC_DRAW);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, lintwopi)
reboundView.orbit_shader_mvp_location = gl.getUniformLocation(reboundView.orbit_shader_program,"mvp");
reboundView.orbit_shader_focus_location = gl.getUniformLocation(reboundView.orbit_shader_program,"focus");
reboundView.orbit_shader_aef_location = gl.getUniformLocation(reboundView.orbit_shader_program,"aef");
reboundView.orbit_shader_omegaOmegainc_location = gl.getUniformLocation(reboundView.orbit_shader_program,"omegaOmegainc");
reboundView.particle_data_buffer = gl.createBuffer();
gl.useProgram(reboundView.point_shader_program);
reboundView.point_shader_mvp_location = gl.getUniformLocation(reboundView.point_shader_program,"mvp");
updateRenderData(reboundView);
gl.clearColor(0.0, 0.0, 0.0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
drawGL(reboundView);
}
function updateRenderData(reboundView){
var overlay = document.getElementById("reboundoverlay-"+reboundView.cid);
overlay.innerHTML = reboundView.model.get("overlay");
var previousN = reboundView.N;
reboundView.N = reboundView.model.get("N");
reboundView.t = reboundView.model.get("t");
reboundView.particle_data = reboundView.model.get('particle_data');
if (reboundView.orbits){
reboundView.orbit_data = reboundView.model.get('orbit_data');
}
var gl = reboundView.gl
if (reboundView.N>0){
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.particle_data_buffer);
gl.bufferData(gl.ARRAY_BUFFER, reboundView.N*7*4, gl.DYNAMIC_DRAW);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, reboundView.particle_data)
}
}
function drawGL(reboundView) {
if (!reboundView.gl){
return;
}
// Cleanup
var gl = reboundView.gl
gl.clearColor(0.0, 0.0, 0.0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
// Draw
gl.useProgram(reboundView.point_shader_program);
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.particle_data_buffer);
var pvp = gl.getAttribLocation(reboundView.point_shader_program,"vp");
gl.enableVertexAttribArray(pvp);
gl.vertexAttribPointer(pvp, 3, gl.FLOAT, 0, 4*7,0); // 4 = size of float
var projection = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
if (reboundView.ratio>=1.){
matortho(projection,
-1.6*reboundView.scale, 1.6*reboundView.scale,
-1.6/reboundView.ratio*reboundView.scale, 1.6/reboundView.ratio*reboundView.scale,
-2.5*reboundView.scale, 2.5*reboundView.scale);
}else{
matortho(projection,
-1.6*reboundView.ratio*reboundView.scale, 1.6*reboundView.ratio*reboundView.scale,
-1.6*reboundView.scale, 1.6*reboundView.scale,
-2.5*reboundView.scale, 2.5*reboundView.scale);
}
var view = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
quat2mat(reboundView.view,view);
var mvp = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
matmult(projection,view,mvp);
gl.uniformMatrix4fv(reboundView.point_shader_mvp_location,false,mattransp(mvp));
gl.drawArrays(gl.POINTS,0,reboundView.N);
if (reboundView.orbits){
gl.useProgram(reboundView.orbit_shader_program);
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.orbit_lintwopi_buffer);
var ltp = gl.getAttribLocation(reboundView.orbit_shader_program,"lintwopi");
gl.enableVertexAttribArray(ltp);
gl.vertexAttribPointer(ltp, 1, gl.FLOAT, 0, 0,0); // 4 = size of float
gl.uniformMatrix4fv(reboundView.orbit_shader_mvp_location,false,mattransp(mvp));
// Need to do this one by one
// because WebGL is not supporting
// instancing:
for(i=0;i<reboundView.N-1;i++){
var focus = new Float32Array(reboundView.orbit_data.buffer,4*9*i,3);
gl.uniform3fv(reboundView.orbit_shader_focus_location,focus);
var aef = new Float32Array(reboundView.orbit_data.buffer,4*(9*i+3),3);
gl.uniform3fv(reboundView.orbit_shader_aef_location,aef);
var omegaOmegainc = new Float32Array(reboundView.orbit_data.buffer,4*(9*i+6),3);
gl.uniform3fv(reboundView.orbit_shader_omegaOmegainc_location,omegaOmegainc);
gl.drawArrays(gl.LINE_STRIP,0,500);
}
}
}
require.undef('rebound');
define('rebound', ["@jupyter-widgets/base"], function(widgets) {
var ReboundView = widgets.DOMWidgetView.extend({
render: function() {
this.el.innerHTML = '<span style="display: inline-block; position: relative;" width="'+this.model.get("width")+'" height="'+this.model.get("height")+'"><canvas style="border: none;" id="reboundcanvas-'+this.cid+'" width="'+this.model.get("width")+'" height="'+this.model.get("height")+'"></canvas><span style="position: absolute; color: #FFF; pointer-events:none; bottom:5px; right:0px; padding-right:5px; font-family: monospace;" id="reboundoverlay-'+this.cid+'">REBOUND</span></span>';
this.model.on('change:t', this.trigger_refresh, this);
this.model.on('change:count', this.trigger_refresh, this);
this.model.on('change:screenshotcount', this.take_screenshot, this);
this.startCount = 0;
this.gl = null;
// Only copy those once
this.scale = this.model.get("scale");
this.width = this.model.get("width");
this.height = this.model.get("height");
this.orbits = this.model.get("orbits");
this.orientation = this.model.get("orientation");
startGL(this);
},
take_screenshot: function() {
drawGL(this);
var canvas = document.getElementById("reboundcanvas-"+this.cid);
var img = canvas.toDataURL("image/png");
this.model.set("screenshot",img, {updated_view: this});
this.touch();
},
trigger_refresh: function() {
updateRenderData(this);
drawGL(this);
},
});
return {
ReboundView: ReboundView
};
});
</script>
"""
import ipywidgets
ipywidgets_major_version = int((ipywidgets.__version__).split(".")[0])
if ipywidgets_major_version<7:
js_code = js_code.replace("@jupyter-widgets/base", "jupyter-js-widgets")
js_code = js_code.replace(".cid", ".id")
from ipywidgets import DOMWidget
import traitlets
import math
import base64
import sys
from ctypes import c_float, byref, create_string_buffer, c_int, c_char, pointer
from . import clibrebound
def savescreenshot(change):
if len(change["new"]) and change["type"] =="change":
w = change["owner"]
bd = base64.b64decode(change["new"].split(",")[-1])
if sys.version_info[0] < 3:
with open(w.screenshotprefix+"%05d.png"%w.screenshotcountall, 'w') as f:
f.write(bd)
else:
with open(w.screenshotprefix+"%05d.png"%w.screenshotcountall, 'bw') as f:
f.write(bd)
w.screenshotcountall += 1
if len(w.times)>w.screenshotcount:
nexttime = w.times[w.screenshotcount]
if w.archive:
sim = w.archive.getSimulation(w.times[w.screenshotcount],mode=w.mode)
w.refresh(pointer(sim))
else:
w.simp.contents.integrate(w.times[w.screenshotcount])
w.screenshotcount += 1
else:
w.unobserve(savescreenshot)
w.times = None
w.screenshotprefix = None
class Widget(DOMWidget):
_view_name = traitlets.Unicode('ReboundView').tag(sync=True)
_view_module = traitlets.Unicode('rebound').tag(sync=True)
count = traitlets.Int(0).tag(sync=True)
screenshotcount = traitlets.Int(0).tag(sync=True)
t = traitlets.Float().tag(sync=True)
N = traitlets.Int().tag(sync=True)
overlay = traitlets.Unicode('REB WIdund').tag(sync=True)
width = traitlets.Float().tag(sync=True)
height = traitlets.Float().tag(sync=True)
scale = traitlets.Float().tag(sync=True)
particle_data = traitlets.CBytes(allow_none=True).tag(sync=True)
orbit_data = traitlets.CBytes(allow_none=True).tag(sync=True)
orientation = traitlets.Tuple().tag(sync=True)
orbits = traitlets.Int().tag(sync=True)
screenshot = traitlets.Unicode().tag(sync=True)
def __init__(self,simulation,size=(200,200),orientation=(0.,0.,0.,1.),scale=None,autorefresh=True,orbits=True, overlay=True):
"""
Initializes a Widget.
Widgets provide real-time 3D interactive visualizations for REBOUND simulations
within Jupyter Notebooks. To use widgets, the ipywidgets package needs to be installed
and enabled in your Jupyter notebook server.
Parameters
----------
size : (int, int), optional
Specify the size of the widget in pixels. The default is 200 times 200 pixels.
orientation : (float, float, float, float), optional
Specify the initial orientation of the view. The four floats correspond to the
x, y, z, and w components of a quaternion. The quaternion will be normalized.
scale : float, optional
Set the initial scale of the view. If not set, the widget will determine the
scale automatically based on current particle positions.
autorefresh : bool, optional
The default value if True. The view is updated whenever a particle is added,
removed and every 100th of a second while a simulation is running. If set
to False, then the user needs to manually call the refresh() function on the
widget. This might be useful if performance is an issue.
orbits : bool, optional
The default value for this is True and the widget will draw the instantaneous
orbits of the particles. For simulations in which particles are not on
Keplerian orbits, the orbits shown will not be accurate.
overlay : string, optional
Change the default text overlay. Set to None to hide all text.
"""
self.screenshotcountall = 0
self.width, self.height = size
self.t, self.N = simulation.t, simulation.N
self.orientation = orientation
self.autorefresh = autorefresh
self.orbits = orbits
self.useroverlay = overlay
self.simp = pointer(simulation)
clibrebound.reb_display_copy_data.restype = c_int
if scale is None:
self.scale = simulation.display_data.contents.scale
else:
self.scale = scale
self.count += 1
super(Widget, self).__init__()
def refresh(self, simp=None, isauto=0):
"""
Manually refreshes a widget.
Note that this function can also be called using the wrapper function of
the Simulation object: sim.refreshWidgets().
"""
if simp==None:
simp = self.simp
if self.autorefresh==0 and isauto==1:
return
sim = simp.contents
size_changed = clibrebound.reb_display_copy_data(simp)
clibrebound.reb_display_prepare_data(simp,c_int(self.orbits))
if sim.N>0:
self.particle_data = (c_char * (4*7*sim.N)).from_address(sim.display_data.contents.particle_data).raw
if self.orbits:
self.orbit_data = (c_char * (4*9*(sim.N-1))).from_address(sim.display_data.contents.orbit_data).raw
if size_changed:
#TODO: Implement better GPU size change
pass
if self.useroverlay==True:
self.overlay = "REBOUND (%s), N=%d, t=%g"%(sim.integrator,sim.N,sim.t)
elif self.useroverlay==None or self.useroverlay==False:
self.overlay = ""
else:
self.overlay = self.useroverlay + ", N=%d, t=%g"%(sim.N,sim.t)
self.N = sim.N
self.t = sim.t
self.count += 1
def takeScreenshot(self, times=None, prefix="./screenshot", resetCounter=False, archive=None,mode="snapshot"):
"""
Take one or more screenshots of the widget and save the images to a file.
The images can be used to create a video.
This function cannot be called multiple times within one cell.
Note: this is a new feature and might not work on all systems.
It was tested on python 2.7.10 and 3.5.2 on MacOSX.
Parameters
----------
times : (float, list), optional
If this argument is not given a screenshot of the widget will be made
as it is (without integrating the simulation). If a float is given, then the
simulation will be integrated to that time and then a screenshot will
be taken. If a list of floats is given, the simulation will be integrated
to each time specified in the array. A separate screenshot for
each time will be saved.
prefix : (str), optional
This string will be part of the output filename for each image.
Follow by a five digit integer and the suffix .png. By default the
prefix is './screenshot' which outputs images in the current
directory with the filnames screenshot00000.png, screenshot00001.png...
Note that the prefix can include a directory.
resetCounter : (bool), optional
Resets the output counter to 0.
archive : (rebound.SimulationArchive), optional
Use a REBOUND SimulationArchive. Thus, instead of integratating the
Simulation from the current time, it will use the SimulationArchive
to load a snapshot. See examples for usage.
mode : (string), optional
Mode to use when querying the SimulationArchive. See SimulationArchive
documentation for details. By default the value is "snapshot".
Examples
--------
First, create a simulation and widget. All of the following can go in
one cell.
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> w = sim.getWidget()
>>> w
The widget should show up. To take a screenshot, simply call
>>> w.takeScreenshot()
A new file with the name screenshot00000.png will appear in the
current directory.
Note that the takeScreenshot command needs to be in a separate cell,
i.e. after you see the widget.
You can pass an array of times to the function. This allows you to
take multiple screenshots, for example to create a movie,
>>> times = [0,10,100]
>>> w.takeScreenshot(times)
"""
self.archive = archive
if resetCounter:
self.screenshotcountall = 0
self.screenshotprefix = prefix
self.screenshotcount = 0
self.overlay = "REBOUND"
self.screenshot = ""
if archive is None:
if times is None:
times = self.simp.contents.t
try:
# List
len(times)
except:
# Float:
times = [times]
self.times = times
self.observe(savescreenshot,names="screenshot")
self.simp.contents.integrate(times[0])
self.screenshotcount += 1 # triggers first screenshot
else:
if times is None:
raise ValueError("Need times argument for archive mode.")
try:
len(times)
except:
raise ValueError("Need a list of times for archive mode.")
self.times = times
self.mode = mode
self.observe(savescreenshot,names="screenshot")
sim = archive.getSimulation(times[0],mode=mode)
self.refresh(pointer(sim))
self.screenshotcount += 1 # triggers first screenshot
@staticmethod
def getClientCode():
return shader_code + js_code
| dtamayo/rebound | rebound/widget.py | Python | gpl-3.0 | 25,951 | 0.006127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search(
"^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
package = 'iosfu'
version = get_version(package)
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
args = {'version': version}
print("You probably want to also tag the version now:")
print(" git tag -a %(version)s -m 'version %(version)s'" % args)
print(" git push --tags")
sys.exit()
setup(
name='iosfu',
version=version,
url='http://github.com/fmartingr/iosfu',
license='MIT',
description='iOS Forensics Utility',
author='Felipe Martin',
author_email='fmartingr@me.com',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=open('requirements.txt').read().split('\n'),
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience'
'Operating System :: OS Independent',
'Programming Language :: Python ;; 2.7',
'Programming Language :: Python ;; 3.3',
'Topic :: Security',
]
)
| fmartingr/iosfu | setup.py | Python | mit | 1,441 | 0 |
# Least common ancestor Problem
#http://www.ics.uci.edu/~eppstein/261/BenFar-LCA-00.pdf
# http://code.activestate.com/recipes/498243-finding-eulerian-path-in-undirected-graph/
# http://codereview.stackexchange.com/questions/104074/eulerian-tour-in-python
# Store Arrays: Parents: P[i] is the parent of i
# Weights: W[i] is the length of tunnel i
# Linearize the tree:
# 1. Store nodes visited on an Eulerian tour of the tree: E[i] O(n)
# 2. Node level: distance from the root. Compute L[i] O(n)
# 3. Representative of a node: first visit in the Eulerian tour. Compute R[i]. O(n)
from collections import defaultdict
import sys
class Graph:
def __init__(self,vertices):
self.V = vertices #No. of vertices
self.weight = [0] * self.V # An array. length[i] = length of tunnel from node i to its parent
self.parent = [0] * self.V # Another array. Parent[i] = parent hill of hill i.
self.children = defaultdict(list)
self.level = [-1]*self.V # Distance from the node to the root
self.E = list() # nodes visited on the Eulerian tour of the tree
self.L = list()
self.R = list() # First visit of i on the Eulerian tour
self.RMQ = dict()
self.depth = 0
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def eulerTour(self, current):
queue = [current]
self.E = [current]
current = self.graph[current].pop()
while(queue):
if self.graph[current]:
queue.append(current)
current = self.graph[current].pop()
else:
current = queue.pop()
self.E.append(current)
#print(self.E)
def findDepth(self, curr, level):
self.level[curr] = level
for v in self.children[curr]:
if self.level[v] == -1 :
self.findDepth(v, level+1)
self.level
def makeR(self):
"""
Create array R
"""
for v in range(0, self.V):
self.R.append(self.E.index(v))
def rmq(self, L, j, k):
"""
Return the index of the of self.L between i and j.
First try with DP?
"""
if (j, k) in self.RMQ:
return self.RMQ[(j, k)]
if (j+1 == k):
if self.L[j] < self.L[k]:
self.RMQ[(j, k)] = j
return j
else:
self.RMQ[(j, k)] = k
return k
for i in range(j+1, k):
left = self.rmq(L, j, i)
right = self.rmq(L, i, k)
if (L[left] < L[right]):
self.RMQ[(j, k)] = left
return left
else:
self.RMQ[(j, k)] = right
return right
def lca(self, u, v):
"""
The nodes in the Euler tour between the first visits to u and v are E[R[u], .... E[R[v]]
The shallowest node in this subtour is at index RMQ(R[u], R[v]) (since L records the level)
The node at this position is E[RMQ(R[u], R[v])]
"""
j = self.R[u]
k = self.R[v]
if j > k:
return(self.lca(v, u))
i = self.rmq(self.L, j, k)
return i
def WStr(self):
string = "W:"
for i in range(0, len(self.weight)):
string += " {}".format(self.weight[i])
return string
def RStr(self):
string = "R:"
for i in range(0, len(self.R)):
string += " {}".format(self.R[i])
return string
def LevelStr(self):
string = "L:"
for i in range(0, len(self.L)):
string += " {}".format(self.L[i])
return string
def EulerStr(self):
string = "E:"
for i in range(0, len(self.E)):
string += " {}".format(self.E[i])
return string
def parentsStr(self):
string = "parents: \n"
for v in range(0, self.V):
string += "{}: {}, w:{}\n".format(v, self.parent[v], self.weight[v])
return string
def childrenStr(self):
string = "children: \n"
for v in range(0, self.V):
string += "{}:".format(v)
for c in range(0, len(self.children[v])):
string += " {}".format(self.children[v][c])
string += "\n"
return string
def load():
V = int(next(sys.stdin))
#sys.stdout.write("V: {}\n".format(V))
i = 1
while(V != 0):
g = Graph(V)
while(i<V):
line = next(sys.stdin).split()
parent = int(line[0])
weight = int(line[1])
g.addEdge(i, parent)
g.parent[i] = parent
g.children[parent].append(i)
g.weight[i] = weight
i = i + 1
Q = int(next(sys.stdin))
queries = list()
i = 0
while(i < Q):
line = next(sys.stdin).split()
q1 = int(line[0])
q2 = int(line[1])
queries.append((q1, q2))
i = i + 1
yield(g, queries)
V = int(next(sys.stdin))
i = 1
for (g, q) in load():
g.eulerTour(0)
try:
g.findDepth(0, 0)
except Exception:
quit()
for e in g.E:
g.L.append(g.level[e])
g.makeR()
for i in range(0, g.V-1):
for j in range(1, g.V):
g.lca(j, i)
first = 0
for i in range(0, len(q)):
v = q[i][0]
w = q[i][1]
i = g.lca(v, w)
ancestor = g.E[i]
path_length = 0
curr = v
while(curr != ancestor):
child = curr
parent = g.parent[curr]
parent_level = g.L[g.R[parent]]
path_length = path_length + g.weight[curr]
curr = parent
curr = w
while(curr != ancestor):
child = curr
parent = g.parent[curr]
parent_level = g.L[g.R[parent]]
path_length = path_length + g.weight[curr]
curr = parent
if first == 0:
sys.stdout.write("{}".format(path_length))
first = 1
else:
sys.stdout.write(" {}".format(path_length))
sys.stdout.write("\n")
| tristan-hunt/UVaProblems | DataStructures/ac.py | Python | gpl-3.0 | 6,305 | 0.00571 |
# This should eventually land in telepathy-python, so has the same license:
# Copyright (C) 2007 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('TubeConnection',)
__docformat__ = 'reStructuredText'
import logging
from dbus.connection import Connection
from dbus import PROPERTIES_IFACE
from telepathy.interfaces import CHANNEL_TYPE_DBUS_TUBE
logger = logging.getLogger('telepathy.tubeconn')
class TubeConnection(Connection):
def __new__(cls, conn, tube, address, group_iface=None, mainloop=None):
self = super(TubeConnection, cls).__new__(cls, address,
mainloop=mainloop)
self._tube = tube
self.participants = {}
self.bus_name_to_handle = {}
self._mapping_watches = []
if group_iface is None:
method = conn.GetSelfHandle
else:
method = group_iface.GetSelfHandle
method(reply_handler=self._on_get_self_handle_reply,
error_handler=self._on_get_self_handle_error)
return self
def _on_get_self_handle_reply(self, handle):
self.self_handle = handle
match = self._tube[CHANNEL_TYPE_DBUS_TUBE].connect_to_signal('DBusNamesChanged',
self._on_dbus_names_changed)
self._tube[PROPERTIES_IFACE].Get(CHANNEL_TYPE_DBUS_TUBE, 'DBusNames',
reply_handler=self._on_get_dbus_names_reply,
error_handler=self._on_get_dbus_names_error)
self._dbus_names_changed_match = match
def _on_get_self_handle_error(self, e):
logging.basicConfig()
logger.error('GetSelfHandle failed: %s', e)
def close(self):
self._dbus_names_changed_match.remove()
self._on_dbus_names_changed((), self.participants.keys())
super(TubeConnection, self).close()
def _on_get_dbus_names_reply(self, names):
self._on_dbus_names_changed(names, ())
def _on_get_dbus_names_error(self, e):
logging.basicConfig()
logger.error('Get DBusNames property failed: %s', e)
def _on_dbus_names_changed(self, added, removed):
for handle, bus_name in added.items():
if handle == self.self_handle:
# I've just joined - set my unique name
self.set_unique_name(bus_name)
self.participants[handle] = bus_name
self.bus_name_to_handle[bus_name] = handle
# call the callback while the removed people are still in
# participants, so their bus names are available
for callback in self._mapping_watches:
callback(added, removed)
for handle in removed:
bus_name = self.participants.pop(handle, None)
self.bus_name_to_handle.pop(bus_name, None)
def watch_participants(self, callback):
self._mapping_watches.append(callback)
if self.participants:
# GetDBusNames already returned: fake a participant add event
# immediately
added = []
for k, v in self.participants.iteritems():
added.append((k, v))
callback(added, [])
| max-posedon/telepathy-python | examples/tubeconn.py | Python | lgpl-2.1 | 3,866 | 0.001035 |
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
from __future__ import division
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized, defaultdict
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.fixes import rankdata
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d. For exhaustive searches, use "
"GridSearchCV." % (grid_size, self.n_iter))
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
# XXX Remove in 0.20
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
return self.scorer_(self.best_estimator_, X, y)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError(('This GridSearchCV instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. ') % method_name)
else:
check_is_fitted(self, 'best_estimator_')
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, labels, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
X, y, labels = indexable(X, y, labels)
n_splits = cv.get_n_splits(X, y, labels)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv.split(X, y, labels))
test_scores, test_sample_counts, _, parameters = zip(*out)
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
test_scores = np.array(test_scores,
dtype=np.float64).reshape(n_candidates,
n_splits)
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
# Computed the (weighted) mean and std for all the candidates
weights = test_sample_counts if self.iid else None
means = np.average(test_scores, axis=1, weights=weights)
stds = np.sqrt(np.average((test_scores - means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results = dict()
for split_i in range(n_splits):
results["test_split%d_score" % split_i] = test_scores[:, split_i]
results["test_mean_score"] = means
results["test_std_score"] = stds
ranks = np.asarray(rankdata(-means, method='min'), dtype=np.int32)
best_index = np.flatnonzero(ranks == 1)[0]
best_parameters = candidate_params[best_index]
results["test_rank_score"] = ranks
# Use one np.MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(np.ma.masked_all, (n_candidates,),
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
self.results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
@property
def best_params_(self):
check_is_fitted(self, 'results_')
return self.results_['params'][self.best_index_]
@property
def best_score_(self):
check_is_fitted(self, 'results_')
return self.results_['test_mean_score'][self.best_index_]
@property
def grid_scores_(self):
warnings.warn(
"The grid_scores_ attribute was deprecated in version 0.18"
" in favor of the more elaborate results_ attribute."
" The grid_scores_ attribute will not be available from 0.20",
DeprecationWarning)
check_is_fitted(self, 'results_')
grid_scores = list()
for i, (params, mean, std) in enumerate(zip(
self.results_['params'],
self.results_['test_mean_score'],
self.results_['test_std_score'])):
scores = np.array(list(self.results_['test_split%d_score' % s][i]
for s in range(self.n_splits_)),
dtype=np.float64)
grid_scores.append(_CVScoreTuple(params, mean, scores))
return grid_scores
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
>>> sorted(clf.results_.keys())
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
['param_C', 'param_kernel', 'params', 'test_mean_score',...
'test_rank_score', 'test_split0_score', 'test_split1_score',...
'test_split2_score', 'test_std_score']
Attributes
----------
results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|test_split0_score|...|...rank..|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'test_split0_score' : [0.8, 0.7, 0.8, 0.9],
'test_split1_score' : [0.82, 0.5, 0.7, 0.78],
'test_mean_score' : [0.81, 0.60, 0.75, 0.82],
'test_std_score' : [0.02, 0.01, 0.03, 0.03],
'test_rank_score' : [2, 4, 3, 1],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, labels=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, labels, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | test_split0_score |...|test_rank_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'test_split0_score' : [0.8, 0.9, 0.7],
'test_split1_score' : [0.82, 0.5, 0.7],
'test_mean_score' : [0.81, 0.7, 0.7],
'test_std_score' : [0.02, 0.2, 0.],
'test_rank_score' : [3, 1, 1],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None, labels=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, labels, sampled_params)
| ClimbsRocks/scikit-learn | sklearn/model_selection/_search.py | Python | bsd-3-clause | 44,853 | 0.000067 |
from flask import current_app, Blueprint, render_template
import util
from database import db_uri
admin = Blueprint("admin", __name__, template_folder="templates")
@admin.route("/", methods=["GET"])
@util.login_required("operator")
def index():
"""
The index page for the admin interface, contains
a list of links to other admin pages
"""
info = {"Database URI": db_uri, "Run Mode": current_app.config["RUNMODE"]}
return render_template("admin_index.html", info=info)
| BenDoan/code_court | code_court/courthouse/views/admin/admin.py | Python | mit | 497 | 0 |
# encoding: UTF-8
'''
本文件中实现了行情数据记录引擎,用于汇总TICK数据,并生成K线插入数据库。
使用DR_setting.json来配置需要收集的合约,以及主力合约代码。
History
<id> <author> <description>
2017050300 hetajen Bat[Auto-CTP连接][Auto-Symbol订阅][Auto-DB写入][Auto-CTA加载]
2017050301 hetajen DB[CtaTemplate增加日线bar数据获取接口][Mongo不保存Tick数据][新增数据来源Sina]
2017051500 hetajen 夜盘tick|bar数据增加tradingDay字段,用于指明夜盘tick|bar数据的真实交易日
2017052500 hetajen DB[增加:5分钟Bar数据的记录、存储和获取]
'''
import json
import os
import copy
from collections import OrderedDict
from datetime import datetime, timedelta
from Queue import Queue
from threading import Thread
from eventEngine import *
from vtGateway import VtSubscribeReq, VtLogData
from drBase import *
from vtFunction import todayDate
from language import text
'''2017050300 Add by hetajen begin'''
'''2017052500 Add by hetajen begin'''
from ctaHistoryData import XH_HistoryDataEngine, HistoryDataEngine
'''2017052500 Add by hetajen end'''
'''2017050300 Add by hetajen end'''
########################################################################
class DrEngine(object):
"""数据记录引擎"""
settingFileName = 'DR_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
settingFileName = os.path.join(path, settingFileName)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 主力合约代码映射字典,key为具体的合约代码(如IF1604),value为主力合约代码(如IF0000)
self.activeSymbolDict = {}
# Tick对象字典
self.tickDict = {}
# K线对象字典
self.barDict = {}
# 负责执行数据库插入的单独线程相关
self.active = False # 工作状态
self.queue = Queue() # 队列
self.thread = Thread(target=self.run) # 线程
# 载入设置,订阅行情
self.loadSetting()
#----------------------------------------------------------------------
def loadSetting(self):
"""载入设置"""
with open(self.settingFileName) as f:
drSetting = json.load(f)
# 如果working设为False则不启动行情记录功能
working = drSetting['working']
if not working:
return
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = setting[0]
# 针对LTS和IB接口,订阅行情需要交易所代码
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
# 针对IB接口,订阅行情需要货币和产品类型
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
drTick = DrTickData() # 该tick实例可以用于缓存部分数据(目前未使用)
self.tickDict[vtSymbol] = drTick
if 'bar' in drSetting:
l = drSetting['bar']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = symbol
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
bar = DrBarData()
self.barDict[vtSymbol] = bar
if 'active' in drSetting:
d = drSetting['active']
# 注意这里的vtSymbol对于IB和LTS接口,应该后缀.交易所
for activeSymbol, vtSymbol in d.items():
self.activeSymbolDict[vtSymbol] = activeSymbol
# 启动数据插入线程
self.start()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
vtSymbol = tick.vtSymbol
# 转化Tick格式
drTick = DrTickData()
d = drTick.__dict__
for key in d.keys():
if key != 'datetime':
d[key] = tick.__getattribute__(key)
drTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
# 更新Tick数据
if vtSymbol in self.tickDict:
'''2017050301 Delete by hetajen begin'''
# self.insertData(TICK_DB_NAME, vtSymbol, drTick)
#
# if vtSymbol in self.activeSymbolDict:
# activeSymbol = self.activeSymbolDict[vtSymbol]
# self.insertData(TICK_DB_NAME, activeSymbol, drTick)
'''2017050301 Delete by hetajen end'''
# 发出日志
self.writeDrLog(text.TICK_LOGGING_MESSAGE.format(symbol=drTick.vtSymbol,
time=drTick.time,
last=drTick.lastPrice,
bid=drTick.bidPrice1,
ask=drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
'''2017050301 Delete by hetajen begin'''
# newBar = copy.copy(bar)
# self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
# if vtSymbol in self.activeSymbolDict:
# activeSymbol = self.activeSymbolDict[vtSymbol]
# self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
'''2017050301 Delete by hetajen end'''
self.writeDrLog(text.BAR_LOGGING_MESSAGE.format(symbol=bar.vtSymbol,
time=bar.time,
open=bar.open,
high=bar.high,
low=bar.low,
close=bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
'''2017051500 Add by hetajen begin'''
bar.tradingDay = drTick.tradingDay
bar.actionDay = drTick.actionDay
'''2017051500 Add by hetajen end'''
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
# ----------------------------------------------------------------------
'''2017050300 Add by hetajen begin'''
def insertDailyBar(self):
e = XH_HistoryDataEngine()
for vtSymbol in self.barDict:
e.downloadFuturesDailyBarSina(vtSymbol)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
e.downloadFuturesDailyBarSina(activeSymbol)
'''2017050300 Add by hetajen end'''
# ----------------------------------------------------------------------
'''2017052500 Add by hetajen begin'''
def insert5MinBar(self):
e = XH_HistoryDataEngine()
for vtSymbol in self.barDict:
e.downloadFutures5MinBarSina(vtSymbol)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
e.downloadFutures5MinBarSina(activeSymbol)
def insertTradeCal(self):
e = HistoryDataEngine()
e.loadTradeCal()
# e.downloadTradeCal()
'''2017052500 Add by hetajen end'''
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是CtaTickData或者CtaBarData)"""
self.queue.put((dbName, collectionName, data.__dict__))
#----------------------------------------------------------------------
def run(self):
"""运行插入线程"""
while self.active:
try:
dbName, collectionName, d = self.queue.get(block=True, timeout=1)
self.mainEngine.dbInsert(dbName, collectionName, d)
except Empty:
pass
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.active = True
self.thread.start()
#----------------------------------------------------------------------
def stop(self):
"""退出"""
if self.active:
self.active = False
self.thread.join()
#----------------------------------------------------------------------
def writeDrLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_DATARECORDER_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
| hetajen/vnpy161 | vn.trader/dataRecorder/drEngine.py | Python | mit | 11,808 | 0.008268 |
from models import Likes
def filterKey(key):
return key.id()
def showCount(post_key):
like_obj = Likes.query(Likes.post == post_key).get()
if like_obj:
return like_obj.like_count
else:
return "0"
| ghoshabhi/Multi-User-Blog | utility/filters.py | Python | mit | 230 | 0.008696 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Spaghetti: Web Server Security Scanner
#
# @url: https://github.com/m4ll0k/Spaghetti
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'doc/LICENSE'
from lib.net import http
from lib.utils import printer
from lib.net import utils
class AdminInterfaces():
def __init__(self,url,agent,proxy,redirect):
self.url = url
self.printer = printer.Printer()
self.http = http.Http(agent=agent,proxy=proxy,redirect=redirect)
self.check = utils.Checker()
def Run(self):
info = {
'name':'Common administration interfaces',
'author':'Momo Outaadi (M4ll0k)',
'description':'Access to administration interfaces panel'
}
dbadmin = open('data/AdminPanels.txt','rb')
dbfiles = list([x.split('\n') for x in dbadmin])
for x in dbfiles:
try:
resp = self.http.Send(self.check.Path(self.url,x[0]))
if resp._content and resp.status_code == 200:
if resp.url == self.check.Path(self.url,x[0]):
self.printer.plus('Admin interface: %s'%(resp.url))
break
else:
pass
except Exception as ERROR:
pass | Yukinoshita47/Yuki-Chan-The-Auto-Pentest | Module/Spaghetti/modules/discovery/AdminInterfaces.py | Python | mit | 1,102 | 0.040835 |
"""This example follows the simple text document Pipeline illustrated in the figures above.
"""
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
# Prepare training documents from a list of (id, text, label) tuples.
training = spark.createDataFrame([
(0, "a b c d e spark", 1.0),
(1, "b d", 0.0),
(2, "spark f g h", 1.0),
(3, "hadoop mapreduce", 0.0)
], ["id", "text", "label"])
# Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.001)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame([
(4, "spark i j k"),
(5, "l m n"),
(6, "spark hadoop spark"),
(7, "apache hadoop")
], ["id", "text"])
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id", "text", "probability", "prediction")
for row in selected.collect():
rid, text, prob, prediction = row
print("(%d, %s) --> prob=%s, prediction=%f" % (rid, text, str(prob), prediction))
| xiligey/xiligey.github.io | code/2.py | Python | apache-2.0 | 1,442 | 0.00208 |
# Copyright (c) 2012 OpenStack Foundation
# Copyright (c) 2015 Rushil Chugh
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Share-related Utilities and helpers."""
from manila.common import constants
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', use_default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend', 'pool',
or 'backend_name', default value is 'backend'
:param use_default_pool_name: This flag specifies what to do
if level == 'pool' and there is no 'pool' info
encoded in host string. default_pool_name=True
will return DEFAULT_POOL_NAME, otherwise it will
return None. Default value of this parameter
is False.
:return: expected level of information
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
ret = extract_host(host, 'backend_name')
# ret is 'BackendB'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if level == 'host':
# Make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
if level == 'backend_name':
hst = host.split('#')[0]
return hst.split('@')[1]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif use_default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def get_active_replica(replica_list):
"""Returns the first 'active' replica in the list of replicas provided."""
for replica in replica_list:
if replica['replica_state'] == constants.REPLICA_STATE_ACTIVE:
return replica
| NetApp/manila | manila/share/utils.py | Python | apache-2.0 | 3,138 | 0 |
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import socket
import time
import traceback
import gc
from aqt.qt import *
import aqt
from anki import Collection
from anki.sync import Syncer, RemoteServer, FullSyncer, MediaSyncer, \
RemoteMediaServer
from anki.hooks import addHook, remHook
from aqt.utils import tooltip, askUserDialog, showWarning, showText, showInfo
# Sync manager
######################################################################
class SyncManager(QObject):
def __init__(self, mw, pm):
QObject.__init__(self, mw)
self.mw = mw
self.pm = pm
def sync(self):
if not self.pm.profile['syncKey']:
auth = self._getUserPass()
if not auth:
return
self.pm.profile['syncUser'] = auth[0]
self._sync(auth)
else:
self._sync()
def _sync(self, auth=None):
# to avoid gui widgets being garbage collected in the worker thread,
# run gc in advance
self._didFullUp = False
self._didError = False
gc.collect()
# create the thread, setup signals and start running
t = self.thread = SyncThread(
self.pm.collectionPath(), self.pm.profile['syncKey'],
auth=auth, media=self.pm.profile['syncMedia'])
t.event.connect(self.onEvent)
self.label = _("Connecting...")
self.mw.progress.start(immediate=True, label=self.label)
self.sentBytes = self.recvBytes = 0
self._updateLabel()
self.thread.start()
while not self.thread.isFinished():
self.mw.app.processEvents()
self.thread.wait(100)
self.mw.progress.finish()
if self.thread.syncMsg:
showText(self.thread.syncMsg)
if self.thread.uname:
self.pm.profile['syncUser'] = self.thread.uname
def delayedInfo():
if self._didFullUp and not self._didError:
showInfo(_("""\
Your collection was successfully uploaded to AnkiWeb.
If you use any other devices, please sync them now, and choose \
to download the collection you have just uploaded from this computer. \
After doing so, future reviews and added cards will be merged \
automatically."""))
self.mw.progress.timer(1000, delayedInfo, False)
def _updateLabel(self):
self.mw.progress.update(label="%s\n%s" % (
self.label,
_("%(a)dkB up, %(b)dkB down") % dict(
a=self.sentBytes // 1024,
b=self.recvBytes // 1024)))
def onEvent(self, evt, *args):
pu = self.mw.progress.update
if evt == "badAuth":
tooltip(
_("AnkiWeb ID or password was incorrect; please try again."),
parent=self.mw)
# blank the key so we prompt user again
self.pm.profile['syncKey'] = None
self.pm.save()
elif evt == "corrupt":
pass
elif evt == "newKey":
self.pm.profile['syncKey'] = args[0]
self.pm.save()
elif evt == "offline":
tooltip(_("Syncing failed; internet offline."))
elif evt == "upbad":
self._didFullUp = False
self._checkFailed()
elif evt == "sync":
m = None; t = args[0]
if t == "login":
m = _("Syncing...")
elif t == "upload":
self._didFullUp = True
m = _("Uploading to AnkiWeb...")
elif t == "download":
m = _("Downloading from AnkiWeb...")
elif t == "sanity":
m = _("Checking...")
elif t == "findMedia":
m = _("Syncing Media...")
elif t == "upgradeRequired":
showText(_("""\
Please visit AnkiWeb, upgrade your deck, then try again."""))
if m:
self.label = m
self._updateLabel()
elif evt == "syncMsg":
self.label = args[0]
self._updateLabel()
elif evt == "error":
self._didError = True
showText(_("Syncing failed:\n%s")%
self._rewriteError(args[0]))
elif evt == "clockOff":
self._clockOff()
elif evt == "checkFailed":
self._checkFailed()
elif evt == "mediaSanity":
showWarning(_("""\
A problem occurred while syncing media. Please use Tools>Check Media, then \
sync again to correct the issue."""))
elif evt == "noChanges":
pass
elif evt == "fullSync":
self._confirmFullSync()
elif evt == "send":
# posted events not guaranteed to arrive in order
self.sentBytes = max(self.sentBytes, int(args[0]))
self._updateLabel()
elif evt == "recv":
self.recvBytes = max(self.recvBytes, int(args[0]))
self._updateLabel()
def _rewriteError(self, err):
if "Errno 61" in err:
return _("""\
Couldn't connect to AnkiWeb. Please check your network connection \
and try again.""")
elif "timed out" in err or "10060" in err:
return _("""\
The connection to AnkiWeb timed out. Please check your network \
connection and try again.""")
elif "code: 500" in err:
return _("""\
AnkiWeb encountered an error. Please try again in a few minutes, and if \
the problem persists, please file a bug report.""")
elif "code: 501" in err:
return _("""\
Please upgrade to the latest version of Anki.""")
# 502 is technically due to the server restarting, but we reuse the
# error message
elif "code: 502" in err:
return _("AnkiWeb is under maintenance. Please try again in a few minutes.")
elif "code: 503" in err:
return _("""\
AnkiWeb is too busy at the moment. Please try again in a few minutes.""")
elif "code: 504" in err:
return _("504 gateway timeout error received. Please try temporarily disabling your antivirus.")
elif "code: 409" in err:
return _("Only one client can access AnkiWeb at a time. If a previous sync failed, please try again in a few minutes.")
elif "10061" in err or "10013" in err or "10053" in err:
return _(
"Antivirus or firewall software is preventing Anki from connecting to the internet.")
elif "10054" in err or "Broken pipe" in err:
return _("Connection timed out. Either your internet connection is experiencing problems, or you have a very large file in your media folder.")
elif "Unable to find the server" in err:
return _(
"Server not found. Either your connection is down, or antivirus/firewall "
"software is blocking Anki from connecting to the internet.")
elif "code: 407" in err:
return _("Proxy authentication required.")
elif "code: 413" in err:
return _("Your collection or a media file is too large to sync.")
elif "EOF occurred in violation of protocol" in err:
return _("Error establishing a secure connection. This is usually caused by antivirus, firewall or VPN software, or problems with your ISP.")
elif "certificate verify failed" in err:
return _("Error establishing a secure connection. This is usually caused by antivirus, firewall or VPN software, or problems with your ISP.")
return err
def _getUserPass(self):
d = QDialog(self.mw)
d.setWindowTitle("Anki")
d.setWindowModality(Qt.WindowModal)
vbox = QVBoxLayout()
l = QLabel(_("""\
<h1>Account Required</h1>
A free account is required to keep your collection synchronized. Please \
<a href="%s">sign up</a> for an account, then \
enter your details below.""") %
"https://ankiweb.net/account/login")
l.setOpenExternalLinks(True)
l.setWordWrap(True)
vbox.addWidget(l)
vbox.addSpacing(20)
g = QGridLayout()
l1 = QLabel(_("AnkiWeb ID:"))
g.addWidget(l1, 0, 0)
user = QLineEdit()
g.addWidget(user, 0, 1)
l2 = QLabel(_("Password:"))
g.addWidget(l2, 1, 0)
passwd = QLineEdit()
passwd.setEchoMode(QLineEdit.Password)
g.addWidget(passwd, 1, 1)
vbox.addLayout(g)
bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
bb.button(QDialogButtonBox.Ok).setAutoDefault(True)
bb.accepted.connect(d.accept)
bb.rejected.connect(d.reject)
vbox.addWidget(bb)
d.setLayout(vbox)
d.show()
accepted = d.exec_()
u = user.text()
p = passwd.text()
if not accepted or not u or not p:
return
return (u, p)
def _confirmFullSync(self):
diag = askUserDialog(_("""\
Your decks here and on AnkiWeb differ in such a way that they can't \
be merged together, so it's necessary to overwrite the decks on one \
side with the decks from the other.
If you choose download, Anki will download the collection from AnkiWeb, \
and any changes you have made on your computer since the last sync will \
be lost.
If you choose upload, Anki will upload your collection to AnkiWeb, and \
any changes you have made on AnkiWeb or your other devices since the \
last sync to this device will be lost.
After all devices are in sync, future reviews and added cards can be merged \
automatically."""),
[_("Upload to AnkiWeb"),
_("Download from AnkiWeb"),
_("Cancel")])
diag.setDefault(2)
ret = diag.run()
if ret == _("Upload to AnkiWeb"):
self.thread.fullSyncChoice = "upload"
elif ret == _("Download from AnkiWeb"):
self.thread.fullSyncChoice = "download"
else:
self.thread.fullSyncChoice = "cancel"
def _clockOff(self):
showWarning(_("""\
Syncing requires the clock on your computer to be set correctly. Please \
fix the clock and try again."""))
def _checkFailed(self):
showWarning(_("""\
Your collection is in an inconsistent state. Please run Tools>\
Check Database, then sync again."""))
def badUserPass(self):
aqt.preferences.Preferences(self, self.pm.profile).dialog.tabWidget.\
setCurrentIndex(1)
# Sync thread
######################################################################
class SyncThread(QThread):
event = pyqtSignal(str, str)
def __init__(self, path, hkey, auth=None, media=True):
QThread.__init__(self)
self.path = path
self.hkey = hkey
self.auth = auth
self.media = media
def run(self):
# init this first so an early crash doesn't cause an error
# in the main thread
self.syncMsg = ""
self.uname = ""
try:
self.col = Collection(self.path, log=True)
except:
self.fireEvent("corrupt")
return
self.server = RemoteServer(self.hkey)
self.client = Syncer(self.col, self.server)
self.sentTotal = 0
self.recvTotal = 0
# throttle updates; qt doesn't handle lots of posted events well
self.byteUpdate = time.time()
def syncEvent(type):
self.fireEvent("sync", type)
def syncMsg(msg):
self.fireEvent("syncMsg", msg)
def canPost():
if (time.time() - self.byteUpdate) > 0.1:
self.byteUpdate = time.time()
return True
def sendEvent(bytes):
self.sentTotal += bytes
if canPost():
self.fireEvent("send", str(self.sentTotal))
def recvEvent(bytes):
self.recvTotal += bytes
if canPost():
self.fireEvent("recv", str(self.recvTotal))
addHook("sync", syncEvent)
addHook("syncMsg", syncMsg)
addHook("httpSend", sendEvent)
addHook("httpRecv", recvEvent)
# run sync and catch any errors
try:
self._sync()
except:
err = traceback.format_exc()
self.fireEvent("error", err)
finally:
# don't bump mod time unless we explicitly save
self.col.close(save=False)
remHook("sync", syncEvent)
remHook("syncMsg", syncMsg)
remHook("httpSend", sendEvent)
remHook("httpRecv", recvEvent)
def _sync(self):
if self.auth:
# need to authenticate and obtain host key
self.hkey = self.server.hostKey(*self.auth)
if not self.hkey:
# provided details were invalid
return self.fireEvent("badAuth")
else:
# write new details and tell calling thread to save
self.fireEvent("newKey", self.hkey)
# run sync and check state
try:
ret = self.client.sync()
except Exception as e:
log = traceback.format_exc()
err = repr(str(e))
if ("Unable to find the server" in err or
"Errno 2" in err):
self.fireEvent("offline")
else:
if not err:
err = log
self.fireEvent("error", err)
return
if ret == "badAuth":
return self.fireEvent("badAuth")
elif ret == "clockOff":
return self.fireEvent("clockOff")
elif ret == "basicCheckFailed" or ret == "sanityCheckFailed":
return self.fireEvent("checkFailed")
# full sync?
if ret == "fullSync":
return self._fullSync()
# save and note success state
if ret == "noChanges":
self.fireEvent("noChanges")
elif ret == "success":
self.fireEvent("success")
elif ret == "serverAbort":
pass
else:
self.fireEvent("error", "Unknown sync return code.")
self.syncMsg = self.client.syncMsg
self.uname = self.client.uname
# then move on to media sync
self._syncMedia()
def _fullSync(self):
# if the local deck is empty, assume user is trying to download
if self.col.isEmpty():
f = "download"
else:
# tell the calling thread we need a decision on sync direction, and
# wait for a reply
self.fullSyncChoice = False
self.fireEvent("fullSync")
while not self.fullSyncChoice:
time.sleep(0.1)
f = self.fullSyncChoice
if f == "cancel":
return
self.client = FullSyncer(self.col, self.hkey, self.server.con)
if f == "upload":
if not self.client.upload():
self.fireEvent("upbad")
else:
self.client.download()
# reopen db and move on to media sync
self.col.reopen()
self._syncMedia()
def _syncMedia(self):
if not self.media:
return
self.server = RemoteMediaServer(self.col, self.hkey, self.server.con)
self.client = MediaSyncer(self.col, self.server)
ret = self.client.sync()
if ret == "noChanges":
self.fireEvent("noMediaChanges")
elif ret == "sanityCheckFailed":
self.fireEvent("mediaSanity")
else:
self.fireEvent("mediaSuccess")
def fireEvent(self, cmd, arg=""):
self.event.emit(cmd, arg)
# Monkey-patch httplib & httplib2 so we can get progress info
######################################################################
CHUNK_SIZE = 65536
import http.client, httplib2
from io import StringIO
from anki.hooks import runHook
print("fixme: _conn_request and _incrementalSend need updating for python3")
# sending in httplib
def _incrementalSend(self, data):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise http.client.NotConnected()
# if it's not a file object, make it one
if not hasattr(data, 'read'):
data = StringIO(data)
while 1:
block = data.read(CHUNK_SIZE)
if not block:
break
self.sock.sendall(block)
runHook("httpSend", len(block))
#http.client.HTTPConnection.send = _incrementalSend
# receiving in httplib2
# this is an augmented version of httplib's request routine that:
# - doesn't assume requests will be tried more than once
# - calls a hook for each chunk of data so we can update the gui
# - retries only when keep-alive connection is closed
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise httplib2.ServerNotFoundError(
"Unable to find the server at %s" % conn.host)
except httplib2.ssl_SSLError:
conn.close()
raise
except socket.error as e:
conn.close()
raise
except http.client.HTTPException:
conn.close()
raise
try:
response = conn.getresponse()
except http.client.BadStatusLine:
print("retry bad line")
conn.close()
conn.connect()
continue
except (socket.error, http.client.HTTPException):
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
buf = StringIO()
while 1:
data = response.read(CHUNK_SIZE)
if not data:
break
buf.write(data)
runHook("httpRecv", len(data))
content = buf.getvalue()
response = httplib2.Response(response)
if method != "HEAD":
content = httplib2._decompressContent(response, content)
return (response, content)
#httplib2.Http._conn_request = _conn_request
| Stvad/anki | aqt/sync.py | Python | agpl-3.0 | 18,494 | 0.00173 |
from pathlib import Path
from django.test import SimpleTestCase
TEST_ROOT_DIR = Path(__file__).parent
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': (
str(TEST_ROOT_DIR / "templates"),
),
'OPTIONS': {
'debug': True,
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"quickstartup.context_processors.project_infos",
"quickstartup.context_processors.project_settings",
),
},
},
]
def get_mail_payloads(message):
text = ""
html = ""
for payload in message.message().get_payload():
if payload.get_content_type() == "text/plain":
text = payload.as_string()
if payload.get_content_type() == "text/html":
html = payload.as_string()
return text, html
def check_form_error(response, form_name, field, errors, msg_prefix=''):
test_case = SimpleTestCase()
test_case.assertFormError(response, form_name, field, errors, msg_prefix)
return True
def check_redirects(response, expected_url):
test_case = SimpleTestCase()
test_case.assertRedirects(response, expected_url=expected_url)
return True
def check_template_used(response, template_name):
test_case = SimpleTestCase()
test_case.assertTemplateUsed(response, template_name=template_name)
return True
def check_contains(response, text):
test_case = SimpleTestCase()
test_case.assertContains(response, text=text)
return True
def check_in_html(needle, haystack):
test_case = SimpleTestCase()
test_case.assertInHTML(needle, haystack)
return True
| osantana/quickstartup | tests/base.py | Python | mit | 2,174 | 0 |
from __future__ import print_function
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.regression import LabeledPoint
from pyspark.sql import SparkSession
from pyspark import SparkContext
import numpy as np
sc = SparkContext('local', 'logistic')
spark = SparkSession \
.builder \
.appName("Logistic regression w/ Spark ML") \
.getOrCreate()
BUCKET='BUCKET_NAME'
# read dataset
traindays = spark.read \
.option("header", "true") \
.csv('gs://{}/flights/trainday.csv'.format(BUCKET))
traindays.createOrReplaceTempView('traindays')
from pyspark.sql.types import StringType, FloatType, StructType, StructField
header = 'FL_DATE,UNIQUE_CARRIER,AIRLINE_ID,CARRIER,FL_NUM,ORIGIN_AIRPORT_ID,ORIGIN_AIRPORT_SEQ_ID,ORIGIN_CITY_MARKET_ID,ORIGIN,DEST_AIRPORT_ID,DEST_AIRPORT_SEQ_ID,DEST_CITY_MARKET_ID,DEST,CRS_DEP_TIME,DEP_TIME,DEP_DELAY,TAXI_OUT,WHEELS_OFF,WHEELS_ON,TAXI_IN,CRS_ARR_TIME,ARR_TIME,ARR_DELAY,CANCELLED,CANCELLATION_CODE,DIVERTED,DISTANCE,DEP_AIRPORT_LAT,DEP_AIRPORT_LON,DEP_AIRPORT_TZOFFSET,ARR_AIRPORT_LAT,ARR_AIRPORT_LON,ARR_AIRPORT_TZOFFSET,EVENT,NOTIFY_TIME'
def get_structfield(colname):
if colname in ['ARR_DELAY', 'DEP_DELAY', 'DISTANCE', 'TAXI_OUT', 'DEP_AIRPORT_TZOFFSET']:
return StructField(colname, FloatType(), True)
else:
return StructField(colname, StringType(), True)
schema = StructType([get_structfield(colname) for colname in header.split(',')])
#inputs = 'gs://{}/flights/tzcorr/all_flights-00000-*'.format(BUCKET) # 1/30th
inputs = 'gs://{}/flights/tzcorr/all_flights-*'.format(BUCKET) # FULL
flights = spark.read\
.schema(schema)\
.csv(inputs)
flights.createOrReplaceTempView('flights')
# separate training and validation data
from pyspark.sql.functions import rand
SEED=13
traindays = traindays.withColumn("holdout", rand(SEED) > 0.8) # 80% of data is for training
traindays.createOrReplaceTempView('traindays')
# logistic regression
trainquery = """
SELECT
DEP_DELAY, TAXI_OUT, ARR_DELAY, DISTANCE, DEP_TIME, DEP_AIRPORT_TZOFFSET
FROM flights f
JOIN traindays t
ON f.FL_DATE == t.FL_DATE
WHERE
t.is_train_day == 'True' AND
t.holdout == False AND
f.CANCELLED == '0.00' AND
f.DIVERTED == '0.00'
"""
traindata = spark.sql(trainquery).repartition(1000)
def get_category(hour):
if hour < 6 or hour > 20:
return [1, 0, 0] # night
if hour < 10:
return [0, 1, 0] # morning
if hour < 17:
return [0, 0, 1] # mid-day
else:
return [0, 0, 0] # evening
def get_local_hour(timestamp, correction):
import datetime
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
timestamp = timestamp.replace('T', ' ') # incase different
t = datetime.datetime.strptime(timestamp, TIME_FORMAT)
d = datetime.timedelta(seconds=correction)
t = t + d
#return t.hour # raw
#theta = np.radians(360 * t.hour / 24.0) # von-Miyes
#return [np.sin(theta), np.cos(theta)]
return get_category(t.hour) # bucketize
def to_example(fields):
features = [ \
fields['DEP_DELAY'], \
fields['DISTANCE'], \
fields['TAXI_OUT'], \
]
features.extend(get_local_hour(fields['DEP_TIME'],
fields['DEP_AIRPORT_TZOFFSET']))
#features.extend(fields['origin_onehot'])
return LabeledPoint(\
float(fields['ARR_DELAY'] < 15), #ontime \
features)
index_model = 0
def add_categorical(df, train=False):
from pyspark.ml.feature import OneHotEncoder, StringIndexer
if train:
indexer = StringIndexer(inputCol='ORIGIN',
outputCol='origin_index')
index_model = indexer.fit(df)
indexed = index_model.transform(df)
encoder = OneHotEncoder(inputCol='origin_index',
outputCol='origin_onehot')
return encoder.transform(indexed)
#traindata = add_categorical(traindata, train=True)
examples = traindata.rdd.map(to_example)
lrmodel = LogisticRegressionWithLBFGS.train(examples, intercept=True)
lrmodel.setThreshold(0.7)
# save model
MODEL_FILE='gs://' + BUCKET + '/flights/sparkmloutput/model'
lrmodel.save(sc, MODEL_FILE)
# evaluate model on the heldout data
evalquery = trainquery.replace("t.holdout == False","t.holdout == True")
evaldata = spark.sql(evalquery).repartition(1000)
#evaldata = add_categorical(evaldata)
examples = evaldata.rdd.map(to_example)
labelpred = examples.map(lambda p: (p.label, lrmodel.predict(p.features)))
def eval(labelpred):
'''
data = (label, pred)
data[0] = label
data[1] = pred
'''
cancel = labelpred.filter(lambda data: data[1] < 0.7)
nocancel = labelpred.filter(lambda data: data[1] >= 0.7)
corr_cancel = cancel.filter(lambda data: data[0] == int(data[1] >= 0.7)).count()
corr_nocancel = nocancel.filter(lambda data: data[0] == int(data[1] >= 0.7)).count()
cancel_denom = cancel.count()
nocancel_denom = nocancel.count()
if cancel_denom == 0:
cancel_denom = 1
if nocancel_denom == 0:
nocancel_denom = 1
return {'total_cancel': cancel.count(), \
'correct_cancel': float(corr_cancel)/cancel_denom, \
'total_noncancel': nocancel.count(), \
'correct_noncancel': float(corr_nocancel)/nocancel_denom \
}
print(eval(labelpred))
| GoogleCloudPlatform/training-data-analyst | quests/data-science-on-gcp-edition1_tf2/07_sparkml_and_bqml/experiment.py | Python | apache-2.0 | 5,377 | 0.015622 |
import asyncio
from unittest import mock
import pytest
from multidict import CIMultiDict
from aiohttp.signals import Signal
from aiohttp.test_utils import make_mocked_request
from aiohttp.web import Application, Response
@pytest.fixture
def app():
return Application()
@pytest.fixture
def debug_app():
return Application(debug=True)
def make_request(app, method, path, headers=CIMultiDict()):
return make_mocked_request(method, path, headers, app=app)
async def test_add_signal_handler_not_a_callable(app):
callback = True
app.on_response_prepare.append(callback)
with pytest.raises(TypeError):
await app.on_response_prepare(None, None)
async def test_function_signal_dispatch(app):
signal = Signal(app)
kwargs = {'foo': 1, 'bar': 2}
callback_mock = mock.Mock()
@asyncio.coroutine
def callback(**kwargs):
callback_mock(**kwargs)
signal.append(callback)
await signal.send(**kwargs)
callback_mock.assert_called_once_with(**kwargs)
async def test_function_signal_dispatch2(app):
signal = Signal(app)
args = {'a', 'b'}
kwargs = {'foo': 1, 'bar': 2}
callback_mock = mock.Mock()
@asyncio.coroutine
def callback(*args, **kwargs):
callback_mock(*args, **kwargs)
signal.append(callback)
await signal.send(*args, **kwargs)
callback_mock.assert_called_once_with(*args, **kwargs)
async def test_response_prepare(app):
callback = mock.Mock()
@asyncio.coroutine
def cb(*args, **kwargs):
callback(*args, **kwargs)
app.on_response_prepare.append(cb)
request = make_request(app, 'GET', '/')
response = Response(body=b'')
await response.prepare(request)
callback.assert_called_once_with(request, response)
async def test_non_coroutine(app):
signal = Signal(app)
kwargs = {'foo': 1, 'bar': 2}
callback = mock.Mock()
signal.append(callback)
await signal.send(**kwargs)
callback.assert_called_once_with(**kwargs)
async def test_debug_signal(debug_app):
assert debug_app.debug, "Should be True"
signal = Signal(debug_app)
callback = mock.Mock()
pre = mock.Mock()
post = mock.Mock()
signal.append(callback)
debug_app.on_pre_signal.append(pre)
debug_app.on_post_signal.append(post)
await signal.send(1, a=2)
callback.assert_called_once_with(1, a=2)
pre.assert_called_once_with(1, 'aiohttp.signals:Signal', 1, a=2)
post.assert_called_once_with(1, 'aiohttp.signals:Signal', 1, a=2)
def test_setitem(app):
signal = Signal(app)
m1 = mock.Mock()
signal.append(m1)
assert signal[0] is m1
m2 = mock.Mock()
signal[0] = m2
assert signal[0] is m2
def test_delitem(app):
signal = Signal(app)
m1 = mock.Mock()
signal.append(m1)
assert len(signal) == 1
del signal[0]
assert len(signal) == 0
def test_cannot_append_to_frozen_signal(app):
signal = Signal(app)
m1 = mock.Mock()
m2 = mock.Mock()
signal.append(m1)
signal.freeze()
with pytest.raises(RuntimeError):
signal.append(m2)
assert list(signal) == [m1]
def test_cannot_setitem_in_frozen_signal(app):
signal = Signal(app)
m1 = mock.Mock()
m2 = mock.Mock()
signal.append(m1)
signal.freeze()
with pytest.raises(RuntimeError):
signal[0] = m2
assert list(signal) == [m1]
def test_cannot_delitem_in_frozen_signal(app):
signal = Signal(app)
m1 = mock.Mock()
signal.append(m1)
signal.freeze()
with pytest.raises(RuntimeError):
del signal[0]
assert list(signal) == [m1]
| playpauseandstop/aiohttp | tests/test_signals.py | Python | apache-2.0 | 3,612 | 0 |
#!/usr/bin/env python
'''
@author Luke C
@date Mon Mar 25 09:57:59 EDT 2013
@file ion/util/stored_values.py
'''
from pyon.core.exception import NotFound
import gevent
class StoredValueManager(object):
def __init__(self, container):
self.store = container.object_store
def stored_value_cas(self, doc_key, document_updates):
'''
Performs a check and set for a lookup_table in the object store for the given key
'''
try:
doc = self.store.read_doc(doc_key)
except NotFound:
doc_id, rev = self.store.create_doc(document_updates, object_id=doc_key)
return doc_id, rev
except KeyError as e:
if 'http' in e.message:
doc_id, rev = self.store.create_doc(document_updates, object_id=doc_key)
return doc_id, rev
for k,v in document_updates.iteritems():
doc[k] = v
doc_id, rev = self.store.update_doc(doc)
return doc_id, rev
def read_value(self, doc_key):
doc = self.store.read_doc(doc_key)
return doc
def read_value_mult(self, doc_keys, strict=False):
doc_list = self.store.read_doc_mult(doc_keys, strict=strict)
return doc_list
def delete_stored_value(self, doc_key):
self.store.delete_doc(doc_key)
| ooici/coi-services | ion/util/stored_values.py | Python | bsd-2-clause | 1,331 | 0.004508 |
import os
import subprocess
import logging
import re
from checks import utils
import sys
class RunSamtoolsCommands:
@classmethod
def _run_subprocess(cls, args_list):
proc = subprocess.run(args_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
utils.log_error(args_list, proc.stderr, proc.returncode)
if proc.stderr or proc.returncode != 0:
raise RuntimeError("ERROR running process: %s, error = %s and exit code = %s" % (args_list, proc.stderr, proc.returncode))
return proc.stdout
@classmethod
def run_samtools_quickcheck(cls, fpath):
return cls._run_subprocess(['samtools', 'quickcheck', '-v', fpath])
@classmethod
def get_samtools_flagstat_output(cls, fpath):
return cls._run_subprocess(['samtools', 'flagstat', fpath])
@classmethod
def get_samtools_stats_output(cls, fpath):
return cls._run_subprocess(['samtools', 'stats', fpath])
@classmethod
def get_samtools_version_output(cls):
return cls._run_subprocess(['samtools', '--version'])
class HandleSamtoolsStats:
@classmethod
def _get_stats(cls, stats_fpath):
if stats_fpath and os.path.isfile(stats_fpath):
return utils.read_from_file(stats_fpath)
return None
@classmethod
def _generate_stats(cls, data_fpath):
if not data_fpath or not os.path.isfile(data_fpath):
raise ValueError("Can't generate stats from a non-existing file: %s" % str(data_fpath))
return RunSamtoolsCommands.get_samtools_stats_output(data_fpath)
@classmethod
def _is_stats_file_older_than_data(cls, data_fpath, stats_fpath):
if utils.compare_mtimestamp(data_fpath, stats_fpath) >= 0:
return True
return False
@classmethod
def fetch_stats(cls, fpath, stats_fpath):
if not fpath or not os.path.isfile(fpath):
raise ValueError("You need to give a valid file path if you want the stats")
if os.path.isfile(stats_fpath) and not cls._is_stats_file_older_than_data(fpath, stats_fpath) and \
utils.can_read_file(stats_fpath):
stats = HandleSamtoolsStats._get_stats(stats_fpath)
logging.info("Reading stats from file %s" % stats_fpath)
else:
stats = HandleSamtoolsStats._generate_stats(fpath)
logging.info("Generating stats for file %s" % fpath)
if os.path.isfile(stats_fpath) and cls._is_stats_file_older_than_data(fpath, stats_fpath):
logging.warning("The stats file is older than the actual file, you need to remove/update it. "
"Regenerating the stats, but without saving.")
return stats
@classmethod
def persist_stats(cls, stats, stats_fpath):
if not stats or not stats_fpath:
raise ValueError("You must provide both stats and stats file path for saving the stats to a file."
" Received stats = %s and stats fpath = %s" % (str(stats), str(stats_fpath)))
if not os.path.isfile(stats_fpath):
logging.info("Persisting the stats to disk")
return utils.write_to_file(stats_fpath, stats)
else:
logging.info("Skipping persist_stats to disk, apparently there is a valid stats file there already.")
return False
@classmethod
def extract_seq_checksum_from_stats(cls, stats: str) -> str:
for line in stats.split('\n'):
if re.search('^CHK', line):
return line
return None
class HandleSamtoolsVersion:
@classmethod
def _get_version_nr_from_samtools_output(cls, output):
version_line = output.splitlines()[0]
tokens = version_line.split()
if len(tokens) < 2:
raise ValueError("samtools --version output looks different than expected. Can't parse it.")
return tokens[1]
@classmethod
def _extract_major_version_nr(cls, version):
return version.split('.', 1)[0]
@classmethod
def _extract_minor_version_nr(cls, version):
vers_tokens = re.split(r'[.-]', version, 1)
if len(vers_tokens) < 2:
raise ValueError("samtools version output looks different than expected.Can't parse it.")
min_vs = re.split(r'[.-]', vers_tokens[1], 1)[0]
return min_vs
@classmethod
def _check_major_version_nr(cls, major_vs_nr):
if not major_vs_nr.isdigit():
raise ValueError("samtools version output looks different than expected. Can't parse it.")
if int(major_vs_nr) < 1:
raise ValueError("You need to use at least samtools version 1.3.")
@classmethod
def _check_minor_version_nr(cls, minor_vs_nr):
if not minor_vs_nr.isdigit():
raise ValueError("samtools version output looks different than expected.Can't parse it.")
minor_nr_1 = minor_vs_nr.split('.', 1)[0]
if not minor_nr_1.isdigit():
raise ValueError("Can't parse samtools version string.")
if int(minor_nr_1[0]) < 3:
raise ValueError("You need to use at least samtools version 1.3.")
@classmethod
def check_samtools_version(cls, version_output):
if not version_output:
raise ValueError("samtools --version output is empty. You need to use at least samtools version 1.3.")
version = cls._get_version_nr_from_samtools_output(version_output)
major_nr = cls._extract_major_version_nr(version)
minor_nr = cls._extract_minor_version_nr(version)
cls._check_major_version_nr(major_nr)
cls._check_minor_version_nr(minor_nr)
class CompareStatsForFiles:
@classmethod
def compare_flagstats(cls, flagstat_b, flagstat_c):
errors = []
if not flagstat_c or not flagstat_b:
errors.append("At least one of the flagstats is missing")
return errors
if flagstat_b != flagstat_c:
logging.error("FLAGSTAT DIFFERENT:\n %s then:\n %s " % (flagstat_b, flagstat_c))
errors.append("FLAGSTAT DIFFERENT:\n %s then:\n %s " % (flagstat_b, flagstat_c))
else:
logging.info("Flagstats are equal.")
return errors
@classmethod
def compare_stats_by_sequence_checksum(cls, stats_b, stats_c):
errors = []
if not stats_b or not stats_c:
errors.append("You need to provide both BAM and CRAM stats for cmparison")
return errors
chk_b = HandleSamtoolsStats.extract_seq_checksum_from_stats(stats_b)
chk_c = HandleSamtoolsStats.extract_seq_checksum_from_stats(stats_c)
if not chk_b:
errors.append("For some reason there is no CHK line in the samtools stats")
logging.error("For some reason there is no CHK line in the samtools stats")
if not chk_c:
errors.append("For some reason there is no CHK line in the samtools stats")
logging.error("For some reason there is no CHK line in the samtools stats")
if chk_b != chk_c:
errors.append("STATS SEQUENCE CHECKSUM DIFFERENT: %s and %s" % (chk_b, chk_c))
logging.error("STATS SEQUENCE CHECKSUM DIFFERENT: %s and %s" % (chk_b, chk_c))
return errors
@classmethod
def compare_bam_and_cram_by_statistics(cls, bam_path, cram_path):
errors = []
# Check that it's a valid file path
if not bam_path or (not utils.is_irods_path(bam_path) and not os.path.isfile(bam_path)):
errors.append("The BAM file path: %s is not valid" % bam_path)
if not cram_path or (not utils.is_irods_path(cram_path) and not os.path.isfile(cram_path)):
errors.append("The CRAM file path:%s is not valid" % cram_path)
if errors:
logging.error("There are errors with the file paths you provided: %s" % errors)
return errors
# Check that the files are readable by me
if not utils.is_irods_path(bam_path) and not utils.can_read_file(bam_path):
errors.append("Can't read file %s" % bam_path)
if not utils.is_irods_path(cram_path) and not utils.can_read_file(cram_path):
errors.append("Can't read file %s" % cram_path)
if errors:
logging.error("There are problems reading the files: %s" % errors)
return errors
# # Checking on samtools version:
# version_output = RunSamtoolsCommands.get_samtools_version_output()
# try:
# HandleSamtoolsVersion.check_samtools_version(version_output)
# except ValueError as e:
# errors.append(str(e))
# return errors
# Quickcheck the files before anything:
try:
RunSamtoolsCommands.run_samtools_quickcheck(bam_path)
except RuntimeError as e:
errors.append(str(e))
try:
RunSamtoolsCommands.run_samtools_quickcheck(cram_path)
except RuntimeError as e:
errors.append(str(e))
if errors:
logging.error("There are problems running quickcheck on the files you've given: %s" % errors)
return errors
# Calculate and compare flagstat:
try:
flagstat_b = RunSamtoolsCommands.get_samtools_flagstat_output(bam_path)
except RuntimeError as e:
errors.append(str(e))
try:
flagstat_c = RunSamtoolsCommands.get_samtools_flagstat_output(cram_path)
except RuntimeError as e:
errors.append(str(e))
if not errors:
errors.extend(cls.compare_flagstats(flagstat_b, flagstat_c))
else:
logging.error("THere are problems running flagstat on the files you've given: %s" % errors)
# Calculate and compare stats:
stats_fpath_b = bam_path + ".stats"
stats_fpath_c = cram_path + ".stats"
stats_b, stats_c = None, None
try:
stats_b = HandleSamtoolsStats.fetch_stats(bam_path, stats_fpath_b)
except (ValueError, RuntimeError) as e:
errors.append(str(e))
try:
stats_c = HandleSamtoolsStats.fetch_stats(cram_path, stats_fpath_c)
except (ValueError, RuntimeError) as e:
errors.append(str(e))
if not errors and stats_b and stats_c:
errors.extend(cls.compare_stats_by_sequence_checksum(stats_b, stats_c))
else:
errors.append("Can't compare samtools stats.")
logging.error("For some reason I can't compare samtools stats for your files.")
# Persist stats:
try:
if stats_b and not utils.is_irods_path(bam_path):
HandleSamtoolsStats.persist_stats(stats_b, stats_fpath_b)
except IOError as e:
errors.append("Can't save stats to disk for %s file" % bam_path)
logging.error("Can't save stats to disk for %s file" % bam_path)
try:
if stats_c and not utils.is_irods_path(cram_path):
HandleSamtoolsStats.persist_stats(stats_c, stats_fpath_c)
except IOError as e:
errors.append("Can't save stats to disk for %s file" % cram_path)
logging.error("Can't save stats to disk for %s file" % cram_path)
return errors
| wtsi-hgi/bam2cram-check | checks/stats_checks.py | Python | gpl-3.0 | 11,324 | 0.003621 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue as Queue
import time
from ansible.errors import *
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role import hash_params
from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader
from ansible.template import Templar
from ansible.utils.debug import debug
__all__ = ['StrategyBase']
# FIXME: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.basedirs = _basedirs[:]
self.filter_loader = filter_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm.get_notified_handlers()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
def run(self, iterator, connection_info, result=True):
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = self._tqm._failed_hosts.keys()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
debug("running handlers")
result &= self.run_handlers(iterator, connection_info)
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# send the stats callback
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
if len(unreachable_hosts) > 0:
return 3
elif len(failed_hosts) > 0:
return 2
elif not result:
return 1
else:
return 0
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
new_vars = vars.copy()
new_vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
new_vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
return new_vars
def _queue_task(self, host, task, task_vars, connection_info):
''' handles queueing the task up to be sent to a worker '''
debug("entering _queue_task() for %s/%s" % (host, task))
# and then queue the new task
debug("%s - putting task (%s) in queue" % (host, task))
try:
debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, shared_loader_obj), block=False)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
debug("got an error while queuing: %s" % e)
return
debug("exiting _queue_task() for %s/%s" % (host, task))
def _process_pending_results(self, iterator):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
while not self._final_q.empty() and not self._tqm._terminated:
try:
result = self._final_q.get(block=False)
debug("got result from result worker: %s" % (result,))
# all host status messages contain 2 entries: (msg, task_result)
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
task_result = result[1]
host = task_result._host
task = task_result._task
if result[0] == 'host_task_failed' or task_result.is_failed():
if not task.ignore_errors:
debug("marking %s as failed" % host.name)
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
else:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
elif result[0] == 'host_unreachable':
self._tqm._unreachable_hosts[host.name] = True
self._tqm._stats.increment('dark', host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif result[0] == 'host_task_skipped':
self._tqm._stats.increment('skipped', host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
elif result[0] == 'host_task_ok':
self._tqm._stats.increment('ok', host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', host.name)
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if host.name in self._blocked_hosts:
del self._blocked_hosts[host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[task_result._task._role._role_name].iteritems():
hashed_entry = hash_params(task_result._task._role._role_params)
if entry == hashed_entry:
role_obj._had_task_run = True
ret_results.append(task_result)
elif result[0] == 'add_host':
task_result = result[1]
new_host_info = task_result.get('add_host', dict())
self._add_host(new_host_info)
elif result[0] == 'add_group':
task = result[1]
self._add_group(task, iterator)
elif result[0] == 'notify_handler':
host = result[1]
handler_name = result[2]
if handler_name not in self._notified_handlers:
self._notified_handlers[handler_name] = []
if host not in self._notified_handlers[handler_name]:
self._notified_handlers[handler_name].append(host)
elif result[0] == 'register_host_var':
# essentially the same as 'set_host_var' below, however we
# never follow the delegate_to value for registered vars
host = result[1]
var_name = result[2]
var_value = result[3]
self._variable_manager.set_host_variable(host, var_name, var_value)
elif result[0] in ('set_host_var', 'set_host_facts'):
host = result[1]
task = result[2]
item = result[3]
if task.delegate_to is not None:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
if item is not None:
task_vars['item'] = item
templar = Templar(loader=self._loader, variables=task_vars)
host_name = templar.template(task.delegate_to)
target_host = self._inventory.get_host(host_name)
if target_host is None:
target_host = Host(name=host_name)
else:
target_host = host
if result[0] == 'set_host_var':
var_name = result[4]
var_value = result[5]
self._variable_manager.set_host_variable(target_host, var_name, var_value)
elif result[0] == 'set_host_facts':
facts = result[4]
self._variable_manager.set_host_facts(target_host, facts)
else:
raise AnsibleError("unknown result message received: %s" % result[0])
except Queue.Empty:
pass
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
results = self._process_pending_results(iterator)
ret_results.extend(results)
time.sleep(0.01)
debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info):
'''
Helper function to add a new host to inventory based on a task result.
'''
host_name = host_info.get('host_name')
# Check if host in cache, add if not
if host_name in self._inventory._hosts_cache:
new_host = self._inventory._hosts_cache[host_name]
else:
new_host = Host(name=host_name)
self._inventory._hosts_cache[host_name] = new_host
allgroup = self._inventory.get_group('all')
allgroup.add_host(new_host)
# Set/update the vars for this host
# FIXME: probably should have a set vars method for the host?
new_vars = host_info.get('host_vars', dict())
new_host.vars.update(new_vars)
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if not self._inventory.get_group(group_name):
new_group = Group(group_name)
self._inventory.add_group(new_group)
new_group.vars = self._inventory.get_group_variables(group_name)
else:
new_group = self._inventory.get_group(group_name)
new_group.add_host(new_host)
# add this host to the group cache
if self._inventory._groups_list is not None:
if group_name in self._inventory._groups_list:
if new_host.name not in self._inventory._groups_list[group_name]:
self._inventory._groups_list[group_name].append(new_host.name)
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
# FIXME: is this still required?
self._inventory.clear_pattern_cache()
def _add_group(self, task, iterator):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
groups = {}
changed = False
for host in self._inventory.get_hosts():
original_task = iterator.get_original_task(host, task)
all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=original_task)
templar = Templar(loader=self._loader, variables=all_vars)
group_name = templar.template(original_task.args.get('key'))
if task.evaluate_conditional(templar=templar, all_vars=all_vars):
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(host)
for group_name, hosts in groups.iteritems():
new_group = self._inventory.get_group(group_name)
if not new_group:
# create the new group and add it to inventory
new_group = Group(name=group_name)
self._inventory.add_group(new_group)
# and add the group to the proper hierarchy
allgroup = self._inventory.get_group('all')
allgroup.add_child_group(new_group)
changed = True
for host in hosts:
if group_name not in host.get_groups():
new_group.add_host(host)
changed = True
return changed
def _load_included_file(self, included_file, iterator):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
except AnsibleError, e:
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
if not isinstance(data, list):
raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds)
is_handler = isinstance(included_file._task, Handler)
block_list = load_list_of_blocks(
data,
play=included_file._task._block._play,
parent_block=included_file._task._block,
task_include=included_file._task,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader
)
# set the vars for this task from those specified as params to the include
for b in block_list:
b.vars = included_file._args.copy()
return block_list
def run_handlers(self, iterator, connection_info):
'''
Runs handlers on those hosts which have been notified.
'''
result = True
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
handler_name = handler.get_name()
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
for host in self._notified_handlers[handler_name]:
if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or connection_info.force_handlers):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, connection_info)
handler.flag_for_host(host)
self._process_pending_results(iterator)
self._wait_on_pending_results(iterator)
# wipe the notification list
self._notified_handlers[handler_name] = []
debug("done running handlers, result is: %s" % result)
return result
| datsfosure/ansible | lib/ansible/plugins/strategies/__init__.py | Python | gpl-3.0 | 19,457 | 0.003443 |
"""
Created on 2013-1-19
@author: Administrator
"""
import doctest
def average(values):
"""Computes the arithmetic mean of a list of numbers.
>>> print(average([20, 30, 70]))
40.0
"""
return sum(values) / len(values)
doctest.testmod() # automatically validate the embedded tests
| quchunguang/test | testpy3/testdoctest.py | Python | mit | 305 | 0.003279 |
import datetime
from mock import patch
from pretend import stub
from gurtel import session
def test_annotates_request():
"""Annotates request with ``session`` property."""
request = stub(
cookies={},
app=stub(secret_key='secret', is_ssl=True, config={}),
)
session.session_middleware(request, lambda req: None)
assert request.session.secret_key == 'secret'
@patch.object(session.JSONSecureCookie, 'save_cookie')
def test_sets_cookie_on_response(mock_save_cookie):
"""Calls ``save_cookie`` on response."""
request = stub(
cookies={},
app=stub(secret_key='secret', is_ssl=True, config={}),
)
response = stub()
session.session_middleware(request, lambda req: response)
mock_save_cookie.assert_called_once_with(
response, httponly=True, secure=True)
@patch.object(session.JSONSecureCookie, 'save_cookie')
@patch.object(session.timezone, 'now')
def test_can_set_expiry(mock_now, mock_save_cookie):
"""Calls ``save_cookie`` on response with expiry date, if configured."""
request = stub(
cookies={},
app=stub(
secret_key='secret',
is_ssl=True,
config={'session.expiry_minutes': '1440'},
),
)
response = stub()
mock_now.return_value = datetime.datetime(2013, 11, 22)
session.session_middleware(request, lambda req: response)
mock_save_cookie.assert_called_once_with(
response,
httponly=True,
secure=True,
expires=datetime.datetime(2013, 11, 23),
)
| oddbird/gurtel | tests/test_session.py | Python | bsd-3-clause | 1,572 | 0 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with the Amazon Transcribe API to
transcribe an audio file to a text file. Also shows how to define a custom vocabulary
to improve the accuracy of the transcription.
This example uses a public domain audio file downloaded from Wikipedia and converted
from .ogg to .mp3 format. The file contains a reading of the poem Jabberwocky by
Lewis Carroll. The original audio source file can be found here:
https://en.wikisource.org/wiki/File:Jabberwocky.ogg
"""
import logging
import sys
import time
import boto3
from botocore.exceptions import ClientError
import requests
# Add relative path to include demo_tools in this code example without need for setup.
sys.path.append('../..')
from demo_tools.custom_waiter import CustomWaiter, WaitState
logger = logging.getLogger(__name__)
class TranscribeCompleteWaiter(CustomWaiter):
"""
Waits for the transcription to complete.
"""
def __init__(self, client):
super().__init__(
'TranscribeComplete', 'GetTranscriptionJob',
'TranscriptionJob.TranscriptionJobStatus',
{'COMPLETED': WaitState.SUCCESS, 'FAILED': WaitState.FAILURE},
client)
def wait(self, job_name):
self._wait(TranscriptionJobName=job_name)
class VocabularyReadyWaiter(CustomWaiter):
"""
Waits for the custom vocabulary to be ready for use.
"""
def __init__(self, client):
super().__init__(
'VocabularyReady', 'GetVocabulary', 'VocabularyState',
{'READY': WaitState.SUCCESS}, client)
def wait(self, vocabulary_name):
self._wait(VocabularyName=vocabulary_name)
# snippet-start:[python.example_code.transcribe.StartTranscriptionJob]
def start_job(
job_name, media_uri, media_format, language_code, transcribe_client,
vocabulary_name=None):
"""
Starts a transcription job. This function returns as soon as the job is started.
To get the current status of the job, call get_transcription_job. The job is
successfully completed when the job status is 'COMPLETED'.
:param job_name: The name of the transcription job. This must be unique for
your AWS account.
:param media_uri: The URI where the audio file is stored. This is typically
in an Amazon S3 bucket.
:param media_format: The format of the audio file. For example, mp3 or wav.
:param language_code: The language code of the audio file.
For example, en-US or ja-JP
:param transcribe_client: The Boto3 Transcribe client.
:param vocabulary_name: The name of a custom vocabulary to use when transcribing
the audio file.
:return: Data about the job.
"""
try:
job_args = {
'TranscriptionJobName': job_name,
'Media': {'MediaFileUri': media_uri},
'MediaFormat': media_format,
'LanguageCode': language_code}
if vocabulary_name is not None:
job_args['Settings'] = {'VocabularyName': vocabulary_name}
response = transcribe_client.start_transcription_job(**job_args)
job = response['TranscriptionJob']
logger.info("Started transcription job %s.", job_name)
except ClientError:
logger.exception("Couldn't start transcription job %s.", job_name)
raise
else:
return job
# snippet-end:[python.example_code.transcribe.StartTranscriptionJob]
# snippet-start:[python.example_code.transcribe.ListTranscriptionJobs]
def list_jobs(job_filter, transcribe_client):
"""
Lists summaries of the transcription jobs for the current AWS account.
:param job_filter: The list of returned jobs must contain this string in their
names.
:param transcribe_client: The Boto3 Transcribe client.
:return: The list of retrieved transcription job summaries.
"""
try:
response = transcribe_client.list_transcription_jobs(
JobNameContains=job_filter)
jobs = response['TranscriptionJobSummaries']
next_token = response.get('NextToken')
while next_token is not None:
response = transcribe_client.list_transcription_jobs(
JobNameContains=job_filter, NextToken=next_token)
jobs += response['TranscriptionJobSummaries']
next_token = response.get('NextToken')
logger.info("Got %s jobs with filter %s.", len(jobs), job_filter)
except ClientError:
logger.exception("Couldn't get jobs with filter %s.", job_filter)
raise
else:
return jobs
# snippet-end:[python.example_code.transcribe.ListTranscriptionJobs]
# snippet-start:[python.example_code.transcribe.GetTranscriptionJob]
def get_job(job_name, transcribe_client):
"""
Gets details about a transcription job.
:param job_name: The name of the job to retrieve.
:param transcribe_client: The Boto3 Transcribe client.
:return: The retrieved transcription job.
"""
try:
response = transcribe_client.get_transcription_job(
TranscriptionJobName=job_name)
job = response['TranscriptionJob']
logger.info("Got job %s.", job['TranscriptionJobName'])
except ClientError:
logger.exception("Couldn't get job %s.", job_name)
raise
else:
return job
# snippet-end:[python.example_code.transcribe.GetTranscriptionJob]
# snippet-start:[python.example_code.transcribe.DeleteTranscriptionJob]
def delete_job(job_name, transcribe_client):
"""
Deletes a transcription job. This also deletes the transcript associated with
the job.
:param job_name: The name of the job to delete.
:param transcribe_client: The Boto3 Transcribe client.
"""
try:
transcribe_client.delete_transcription_job(
TranscriptionJobName=job_name)
logger.info("Deleted job %s.", job_name)
except ClientError:
logger.exception("Couldn't delete job %s.", job_name)
raise
# snippet-end:[python.example_code.transcribe.DeleteTranscriptionJob]
# snippet-start:[python.example_code.transcribe.CreateVocabulary]
def create_vocabulary(
vocabulary_name, language_code, transcribe_client,
phrases=None, table_uri=None):
"""
Creates a custom vocabulary that can be used to improve the accuracy of
transcription jobs. This function returns as soon as the vocabulary processing
is started. Call get_vocabulary to get the current status of the vocabulary.
The vocabulary is ready to use when its status is 'READY'.
:param vocabulary_name: The name of the custom vocabulary.
:param language_code: The language code of the vocabulary.
For example, en-US or nl-NL.
:param transcribe_client: The Boto3 Transcribe client.
:param phrases: A list of comma-separated phrases to include in the vocabulary.
:param table_uri: A table of phrases and pronunciation hints to include in the
vocabulary.
:return: Information about the newly created vocabulary.
"""
try:
vocab_args = {'VocabularyName': vocabulary_name, 'LanguageCode': language_code}
if phrases is not None:
vocab_args['Phrases'] = phrases
elif table_uri is not None:
vocab_args['VocabularyFileUri'] = table_uri
response = transcribe_client.create_vocabulary(**vocab_args)
logger.info("Created custom vocabulary %s.", response['VocabularyName'])
except ClientError:
logger.exception("Couldn't create custom vocabulary %s.", vocabulary_name)
raise
else:
return response
# snippet-end:[python.example_code.transcribe.CreateVocabulary]
# snippet-start:[python.example_code.transcribe.ListVocabularies]
def list_vocabularies(vocabulary_filter, transcribe_client):
"""
Lists the custom vocabularies created for this AWS account.
:param vocabulary_filter: The returned vocabularies must contain this string in
their names.
:param transcribe_client: The Boto3 Transcribe client.
:return: The list of retrieved vocabularies.
"""
try:
response = transcribe_client.list_vocabularies(
NameContains=vocabulary_filter)
vocabs = response['Vocabularies']
next_token = response.get('NextToken')
while next_token is not None:
response = transcribe_client.list_vocabularies(
NameContains=vocabulary_filter, NextToken=next_token)
vocabs += response['Vocabularies']
next_token = response.get('NextToken')
logger.info(
"Got %s vocabularies with filter %s.", len(vocabs), vocabulary_filter)
except ClientError:
logger.exception(
"Couldn't list vocabularies with filter %s.", vocabulary_filter)
raise
else:
return vocabs
# snippet-end:[python.example_code.transcribe.ListVocabularies]
# snippet-start:[python.example_code.transcribe.GetVocabulary]
def get_vocabulary(vocabulary_name, transcribe_client):
"""
Gets information about a customer vocabulary.
:param vocabulary_name: The name of the vocabulary to retrieve.
:param transcribe_client: The Boto3 Transcribe client.
:return: Information about the vocabulary.
"""
try:
response = transcribe_client.get_vocabulary(VocabularyName=vocabulary_name)
logger.info("Got vocabulary %s.", response['VocabularyName'])
except ClientError:
logger.exception("Couldn't get vocabulary %s.", vocabulary_name)
raise
else:
return response
# snippet-end:[python.example_code.transcribe.GetVocabulary]
# snippet-start:[python.example_code.transcribe.UpdateVocabulary]
def update_vocabulary(
vocabulary_name, language_code, transcribe_client, phrases=None,
table_uri=None):
"""
Updates an existing custom vocabulary. The entire vocabulary is replaced with
the contents of the update.
:param vocabulary_name: The name of the vocabulary to update.
:param language_code: The language code of the vocabulary.
:param transcribe_client: The Boto3 Transcribe client.
:param phrases: A list of comma-separated phrases to include in the vocabulary.
:param table_uri: A table of phrases and pronunciation hints to include in the
vocabulary.
"""
try:
vocab_args = {'VocabularyName': vocabulary_name, 'LanguageCode': language_code}
if phrases is not None:
vocab_args['Phrases'] = phrases
elif table_uri is not None:
vocab_args['VocabularyFileUri'] = table_uri
response = transcribe_client.update_vocabulary(**vocab_args)
logger.info(
"Updated custom vocabulary %s.", response['VocabularyName'])
except ClientError:
logger.exception("Couldn't update custom vocabulary %s.", vocabulary_name)
raise
# snippet-end:[python.example_code.transcribe.UpdateVocabulary]
# snippet-start:[python.example_code.transcribe.DeleteVocabulary]
def delete_vocabulary(vocabulary_name, transcribe_client):
"""
Deletes a custom vocabulary.
:param vocabulary_name: The name of the vocabulary to delete.
:param transcribe_client: The Boto3 Transcribe client.
"""
try:
transcribe_client.delete_vocabulary(VocabularyName=vocabulary_name)
logger.info("Deleted vocabulary %s.", vocabulary_name)
except ClientError:
logger.exception("Couldn't delete vocabulary %s.", vocabulary_name)
raise
# snippet-end:[python.example_code.transcribe.DeleteVocabulary]
# snippet-start:[python.example_code.transcribe.Scenario_CustomVocabulary]
def usage_demo():
"""Shows how to use the Amazon Transcribe service."""
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
s3_resource = boto3.resource('s3')
transcribe_client = boto3.client('transcribe')
print('-'*88)
print("Welcome to the Amazon Transcribe demo!")
print('-'*88)
bucket_name = f'jabber-bucket-{time.time_ns()}'
print(f"Creating bucket {bucket_name}.")
bucket = s3_resource.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': transcribe_client.meta.region_name})
media_file_name = '.media/Jabberwocky.mp3'
media_object_key = 'Jabberwocky.mp3'
print(f"Uploading media file {media_file_name}.")
bucket.upload_file(media_file_name, media_object_key)
media_uri = f's3://{bucket.name}/{media_object_key}'
job_name_simple = f'Jabber-{time.time_ns()}'
print(f"Starting transcription job {job_name_simple}.")
start_job(
job_name_simple, f's3://{bucket_name}/{media_object_key}', 'mp3', 'en-US',
transcribe_client)
transcribe_waiter = TranscribeCompleteWaiter(transcribe_client)
transcribe_waiter.wait(job_name_simple)
job_simple = get_job(job_name_simple, transcribe_client)
transcript_simple = requests.get(
job_simple['Transcript']['TranscriptFileUri']).json()
print(f"Transcript for job {transcript_simple['jobName']}:")
print(transcript_simple['results']['transcripts'][0]['transcript'])
print('-'*88)
print("Creating a custom vocabulary that lists the nonsense words to try to "
"improve the transcription.")
vocabulary_name = f'Jabber-vocabulary-{time.time_ns()}'
create_vocabulary(
vocabulary_name, 'en-US', transcribe_client,
phrases=[
'brillig', 'slithy', 'borogoves', 'mome', 'raths', 'Jub-Jub', 'frumious',
'manxome', 'Tumtum', 'uffish', 'whiffling', 'tulgey', 'thou', 'frabjous',
'callooh', 'callay', 'chortled'],
)
vocabulary_ready_waiter = VocabularyReadyWaiter(transcribe_client)
vocabulary_ready_waiter.wait(vocabulary_name)
job_name_vocabulary_list = f'Jabber-vocabulary-list-{time.time_ns()}'
print(f"Starting transcription job {job_name_vocabulary_list}.")
start_job(
job_name_vocabulary_list, media_uri, 'mp3', 'en-US', transcribe_client,
vocabulary_name)
transcribe_waiter.wait(job_name_vocabulary_list)
job_vocabulary_list = get_job(job_name_vocabulary_list, transcribe_client)
transcript_vocabulary_list = requests.get(
job_vocabulary_list['Transcript']['TranscriptFileUri']).json()
print(f"Transcript for job {transcript_vocabulary_list['jobName']}:")
print(transcript_vocabulary_list['results']['transcripts'][0]['transcript'])
print('-'*88)
print("Updating the custom vocabulary with table data that provides additional "
"pronunciation hints.")
table_vocab_file = 'jabber-vocabulary-table.txt'
bucket.upload_file(table_vocab_file, table_vocab_file)
update_vocabulary(
vocabulary_name, 'en-US', transcribe_client,
table_uri=f's3://{bucket.name}/{table_vocab_file}')
vocabulary_ready_waiter.wait(vocabulary_name)
job_name_vocab_table = f'Jabber-vocab-table-{time.time_ns()}'
print(f"Starting transcription job {job_name_vocab_table}.")
start_job(
job_name_vocab_table, media_uri, 'mp3', 'en-US', transcribe_client,
vocabulary_name=vocabulary_name)
transcribe_waiter.wait(job_name_vocab_table)
job_vocab_table = get_job(job_name_vocab_table, transcribe_client)
transcript_vocab_table = requests.get(
job_vocab_table['Transcript']['TranscriptFileUri']).json()
print(f"Transcript for job {transcript_vocab_table['jobName']}:")
print(transcript_vocab_table['results']['transcripts'][0]['transcript'])
print('-'*88)
print("Getting data for jobs and vocabularies.")
jabber_jobs = list_jobs('Jabber', transcribe_client)
print(f"Found {len(jabber_jobs)} jobs:")
for job_sum in jabber_jobs:
job = get_job(job_sum['TranscriptionJobName'], transcribe_client)
print(f"\t{job['TranscriptionJobName']}, {job['Media']['MediaFileUri']}, "
f"{job['Settings'].get('VocabularyName')}")
jabber_vocabs = list_vocabularies('Jabber', transcribe_client)
print(f"Found {len(jabber_vocabs)} vocabularies:")
for vocab_sum in jabber_vocabs:
vocab = get_vocabulary(vocab_sum['VocabularyName'], transcribe_client)
vocab_content = requests.get(vocab['DownloadUri']).text
print(f"\t{vocab['VocabularyName']} contents:")
print(vocab_content)
print('-'*88)
print("Deleting demo jobs.")
for job_name in [job_name_simple, job_name_vocabulary_list, job_name_vocab_table]:
delete_job(job_name, transcribe_client)
print("Deleting demo vocabulary.")
delete_vocabulary(vocabulary_name, transcribe_client)
print("Deleting demo bucket.")
bucket.objects.delete()
bucket.delete()
print("Thanks for watching!")
# snippet-end:[python.example_code.transcribe.Scenario_CustomVocabulary]
if __name__ == '__main__':
usage_demo()
| awsdocs/aws-doc-sdk-examples | python/example_code/transcribe/transcribe_basics.py | Python | apache-2.0 | 17,119 | 0.002045 |
import dryscrape
dryscrape.start_xvfb()
sess = dryscrape.Session(base_url = 'http://shop.mango.com')
sess.set_attribute('auto_load_images',False)
sess.visit('/ES/m/hombre/prendas/todas/?m=coleccion')
print sess.at_xpath("//*").children()
print "--------------------------"
print sess.at_xpath("//*[contains(@class,\"searchResultPrice\")]/text()")
#for price in sess.at_xpath("//*[contains(@class,\"searchResultPrice\")]"):
# print price
| carlosb1/examples-python | ideas/mango-example/mango.py | Python | gpl-2.0 | 444 | 0.009009 |
import os
import re
from amlib import conf, utils, log
'''
Functions for parsing AD automount maps into a common dict format.
Part of ampush. https://github.com/sfu-rcg/ampush
Copyright (C) 2016 Research Computing Group, Simon Fraser University.
'''
# ff = flat file automount map
def get_names():
'''
Return a list of files in ${conf/flat_file_map_dir} with the master map and
(optional) direct map first.
'''
l_names, fs_map_names = [], []
for root, dirs, filenames in os.walk(conf.c['flat_file_map_dir']):
for map_name in filenames:
fs_map_names.append(map_name)
# ensure the master map and direct map (if one exists) are processed first
l_names.append(conf.c['master_map_name'])
try:
fs_map_names.remove(conf.c['master_map_name'])
except ValueError:
log_msg = '{0} does not exist on the filesystem. Terminating.'
log_msg = log_msg.format(conf.c['master_map_name'])
log.m.critical(log_msg)
print(log_msg)
exit(6)
if conf.c['direct_map_name'] in fs_map_names:
l_names.append(conf.c['direct_map_name'])
fs_map_names.remove(conf.c['direct_map_name'])
fs_map_names.sort()
for map_name in fs_map_names:
if re.match(r'^auto\.', map_name):
l_names.append(map_name)
return l_names
def detect_orphans():
'''
Return a list of maps that exist on the filesystem but are not mentioned
in auto.master.
'''
master_entries = parse(conf.c['master_map_name'])
master_mapnames = []
l_orphans = []
for k, v in master_entries.items():
master_mapnames.append(v['map'])
for ff_mapname in get_names():
# auto.master should not be listed in auto.master
if (ff_mapname not in master_mapnames and
ff_mapname != 'auto.master'):
l_orphans.append(ff_mapname)
if len(l_orphans) > 0:
l_orphans.sort()
log_msg = 'Found unused maps listed in {0}: {1}'
log_msg = log_msg.format(conf.c['master_map_name'],
' '.join(l_orphans))
log.m.warning(log_msg)
print(log_msg)
return
def parse_master(map_lines=None, map_name=None):
'''
Ingest master map as a list of strings. Return a nice dict like this:
{'/-': {'map': 'auto.direct', 'options': '-rw,intr,soft,bg'},
'/foo': {'map': 'auto.foo', 'options': '-rw,intr,soft,bg'},
'/bar': {'map': 'auto.bar', 'options': '-rw,intr,soft,bg'},
'/baz': {'map': 'auto.baz',
'options': '-ro,int,soft,bg,fstype=nfs4,port=2049'},}
'''
d_map = {}
for l in map_lines:
chunks = l.split()
am_key = chunks[0]
joined = ' '.join(chunks)
d_map[am_key] = {}
'''
As with submaps the mount options field is optional.
2 fields == automount entry without mount options.
'''
if len(chunks) == 2:
d_map[am_key] = {'map': chunks[1]}
log_msg = 'No mount options for {0} in {1}'
log_msg = log_msg.format(am_key, conf.c['master_map_name'])
log.m.info(log_msg)
# 3 fields? automount directory + mapname + mount options
elif len(chunks) == 3:
d_map[am_key] = {'map': chunks[1],
'options': chunks[2]}
else:
log_msg = (
'Terminating. Bad flat file master map format: '
'unexpected number of fields in ' + joined
)
log.m.critical(log_msg)
print(log_msg)
exit(11)
return d_map
def parse_submap(map_name=None, map_lines=None):
'''
Ingest a list of automount map entries. Return a nice dict like this:
{'yuv': {'options': '-intr,bg,tcp,vers=4',
'server_dir': '/yuv',
'server_hostname': 'nfssrv01.example.com'},
'luma': {'options': '-nosuid,tcp,intr,bg,vers=3,rw',
'server_dir': '/exports/luma',
'server_hostname': 'nfssrv02.example.com'}, ...}
'''
d_map = {}
log_msg = 'Reading {0}/{1}'.format(conf.c['flat_file_map_dir'],
map_name)
log.m.debug(log_msg)
for l in map_lines:
chunks = l.split()
am_key = chunks[0] # automount key
utils.validate_nis_map_entry(in_list=chunks[1:],
map_name=map_name,
am_key=am_key,
map_type='flat file')
d_map[am_key] = {}
'''
Consider these two valid automount entries:
apps -tcp,vers=3 nfs-server1.example.com:/exports/apps
data nfs-server2.example.com:/srv/data
If a third field exists, use it as the NFS path.
Otherwise use the second field as the NFS path.
'''
try: # server:path pair with options
server_hostname = chunks[2].split(':')[0]
server_dir = chunks[2].split(':')[1]
options = chunks[1]
utils.validate_mount_options(opt_str=options,
map_name=map_name,
am_key=am_key)
d_map[am_key] = {'server_hostname': server_hostname,
'server_dir': server_dir,
'options': options}
except IndexError: # without options
server_hostname = chunks[1].split(':')[0]
server_dir = chunks[1].split(':')[1]
d_map[am_key] = {'server_hostname': server_hostname,
'server_dir': server_dir,
'options': None}
return d_map
def parse(map_name=None):
'''
Read flat file automount maps ${ampush.conf/flat_file_map_dir} and
pass map names to parser_master_map or parse_submap.
'''
map_pathname = conf.c['flat_file_map_dir'] + '/' + map_name
map_lines = utils.ff_map_to_list(map_pathname)
map_type = 'flat file'
# different map types (master, direct, plain) == different sanity checks
if map_name == conf.c['master_map_name']:
d_map = parse_master(map_name=map_name,
map_lines=map_lines)
utils.master_map_sanity_checks(map_dict=d_map,
map_type=map_type)
else:
d_map = parse_submap(map_name=map_name,
map_lines=map_lines)
utils.submap_sanity_checks(map_dict=d_map,
map_type=map_type)
return d_map
| sfu-rcg/ampush | amlib/file_map.py | Python | mit | 6,519 | 0.00046 |
# Copyright (c) 2013 Jendrik Poloczek
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Aborilov Pavel
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014-2015 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -*- coding: utf-8 -*-
from . import base
from .generic_poll_text import GenPollUrl
import locale
class BitcoinTicker(GenPollUrl):
'''
A bitcoin ticker widget, data provided by the btc-e.com API. Defaults
to displaying currency in whatever the current locale is.
'''
QUERY_URL = "https://btc-e.com/api/2/btc_%s/ticker"
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('currency', locale.localeconv()['int_curr_symbol'].strip(),
'The currency the value of bitcoin is displayed in'),
('format', 'BTC Buy: {buy}, Sell: {sell}',
'Display format, allows buy, sell, high, low, avg, '
'vol, vol_cur, last, variables.'),
]
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(BitcoinTicker.defaults)
@property
def url(self):
return self.QUERY_URL % self.currency.lower()
def parse(self, body):
formatted = {}
if 'error' in body and body['error'] == "invalid pair":
locale.setlocale(locale.LC_MONETARY, "en_US.UTF-8")
self.currency = locale.localeconv()['int_curr_symbol'].strip()
body = self.fetch(self.url)
for k, v in body['ticker'].items():
formatted[k] = locale.currency(v)
return self.format.format(**formatted)
| kopchik/qtile | libqtile/widget/bitcoin_ticker.py | Python | mit | 2,607 | 0 |
# -*- coding: utf-8 -*-}
import logging
import os
import uuid
import requests
from flask import request, current_app, g
from flask_babel import gettext
from flask_restful import Resource
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.elements import and_
from marshmallow.exceptions import ValidationError
from tahiti.app_auth import requires_auth
from tahiti.schema import *
from tahiti.workflow_api import filter_by_permissions
log = logging.getLogger(__name__)
class WorkflowPermissionApi(Resource):
""" REST API for sharing a Workflow """
@staticmethod
@requires_auth
def post(workflow_id, user_id):
result, result_code = dict(
status="ERROR",
message=gettext('Missing json in the request body')), 400
if request.json is not None:
form = request.json
to_validate = ['permission', 'user_name', 'user_login']
error = False
for check in to_validate:
if check not in form or form.get(check, '').strip() == '':
result, result_code = dict(
status="ERROR", message=gettext('Validation error'),
errors={'Missing': check}), 400
error = True
break
if check == 'permission' and form.get(
'permission') not in list(PermissionType.values()):
result, result_code = dict(
status="ERROR", message=gettext('Validation error'),
errors={'Invalid': check}), 400
error = True
break
if not error:
try:
filtered = filter_by_permissions(
Workflow.query, [PermissionType.WRITE])
workflow = filtered.filter(
Workflow.id == workflow_id).first()
if workflow is not None:
conditions = [WorkflowPermission.workflow_id ==
workflow_id,
WorkflowPermission.user_id == user_id]
permission = WorkflowPermission.query.filter(
*conditions).first()
action_performed = 'Added'
if permission is not None:
action_performed = 'Updated'
permission.permission = form['permission']
else:
permission = WorkflowPermission(
workflow=workflow, user_id=user_id,
user_name=form['user_name'],
user_login=form['user_login'],
permission=form['permission'])
db.session.add(permission)
db.session.commit()
result, result_code = {'message': action_performed,
'status': 'OK'}, 200
else:
result, result_code = dict(
status="ERROR",
message=gettext("%(type)s not found.",
type=gettext('Data source'))), 404
except Exception as e:
log.exception('Error in POST')
result, result_code = dict(status="ERROR",
message=gettext(
"Internal error")), 500
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
return result, result_code
@staticmethod
@requires_auth
def delete(workflow_id, user_id):
result, result_code = dict(status="ERROR",
message=gettext("%(type)s not found.",
type=gettext(
'Data source'))), 404
filtered = filter_by_permissions(Workflow.query,
[PermissionType.WRITE])
workflow = filtered.filter(Workflow.id == workflow_id).first()
if workflow is not None:
permission = WorkflowPermission.query.filter(
WorkflowPermission.workflow_id == workflow_id,
WorkflowPermission.user_id == user_id).first()
if permission is not None:
try:
db.session.delete(permission)
db.session.commit()
result, result_code = dict(
status="OK",
message=gettext("%(what)s was successively deleted",
what=gettext('Workflow'))), 200
except Exception as e:
log.exception('Error in DELETE')
result, result_code = dict(status="ERROR",
message=gettext(
"Internal error")), 500
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
return result, result_code
| eubr-bigsea/tahiti | tahiti/workflow_permission_api.py | Python | apache-2.0 | 5,484 | 0 |
"""Support for monitoring emoncms feeds."""
from __future__ import annotations
from datetime import timedelta
from http import HTTPStatus
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_ID,
CONF_SCAN_INTERVAL,
CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
CONF_VALUE_TEMPLATE,
POWER_WATT,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_FEEDID = "FeedId"
ATTR_FEEDNAME = "FeedName"
ATTR_LASTUPDATETIME = "LastUpdated"
ATTR_LASTUPDATETIMESTR = "LastUpdatedStr"
ATTR_SIZE = "Size"
ATTR_TAG = "Tag"
ATTR_USERID = "UserId"
CONF_EXCLUDE_FEEDID = "exclude_feed_id"
CONF_ONLY_INCLUDE_FEEDID = "include_only_feed_id"
CONF_SENSOR_NAMES = "sensor_names"
DECIMALS = 2
DEFAULT_UNIT = POWER_WATT
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
ONLY_INCL_EXCL_NONE = "only_include_exclude_or_none"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_ID): cv.positive_int,
vol.Exclusive(CONF_ONLY_INCLUDE_FEEDID, ONLY_INCL_EXCL_NONE): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Exclusive(CONF_EXCLUDE_FEEDID, ONLY_INCL_EXCL_NONE): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Optional(CONF_SENSOR_NAMES): vol.All(
{cv.positive_int: vol.All(cv.string, vol.Length(min=1))}
),
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT): cv.string,
}
)
def get_id(sensorid, feedtag, feedname, feedid, feeduserid):
"""Return unique identifier for feed / sensor."""
return f"emoncms{sensorid}_{feedtag}_{feedname}_{feedid}_{feeduserid}"
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Emoncms sensor."""
apikey = config.get(CONF_API_KEY)
url = config.get(CONF_URL)
sensorid = config.get(CONF_ID)
value_template = config.get(CONF_VALUE_TEMPLATE)
config_unit = config.get(CONF_UNIT_OF_MEASUREMENT)
exclude_feeds = config.get(CONF_EXCLUDE_FEEDID)
include_only_feeds = config.get(CONF_ONLY_INCLUDE_FEEDID)
sensor_names = config.get(CONF_SENSOR_NAMES)
interval = config.get(CONF_SCAN_INTERVAL)
if value_template is not None:
value_template.hass = hass
data = EmonCmsData(hass, url, apikey, interval)
data.update()
if data.data is None:
return
sensors = []
for elem in data.data:
if exclude_feeds is not None and int(elem["id"]) in exclude_feeds:
continue
if include_only_feeds is not None and int(elem["id"]) not in include_only_feeds:
continue
name = None
if sensor_names is not None:
name = sensor_names.get(int(elem["id"]), None)
if unit := elem.get("unit"):
unit_of_measurement = unit
else:
unit_of_measurement = config_unit
sensors.append(
EmonCmsSensor(
hass,
data,
name,
value_template,
unit_of_measurement,
str(sensorid),
elem,
)
)
add_entities(sensors)
class EmonCmsSensor(SensorEntity):
"""Implementation of an Emoncms sensor."""
def __init__(
self, hass, data, name, value_template, unit_of_measurement, sensorid, elem
):
"""Initialize the sensor."""
if name is None:
# Suppress ID in sensor name if it's 1, since most people won't
# have more than one EmonCMS source and it's redundant to show the
# ID if there's only one.
id_for_name = "" if str(sensorid) == "1" else sensorid
# Use the feed name assigned in EmonCMS or fall back to the feed ID
feed_name = elem.get("name") or f"Feed {elem['id']}"
self._name = f"EmonCMS{id_for_name} {feed_name}"
else:
self._name = name
self._identifier = get_id(
sensorid, elem["tag"], elem["name"], elem["id"], elem["userid"]
)
self._hass = hass
self._data = data
self._value_template = value_template
self._unit_of_measurement = unit_of_measurement
self._sensorid = sensorid
self._elem = elem
if unit_of_measurement == "kWh":
self._attr_device_class = SensorDeviceClass.ENERGY
self._attr_state_class = SensorStateClass.TOTAL_INCREASING
elif unit_of_measurement == "W":
self._attr_device_class = SensorDeviceClass.POWER
self._attr_state_class = SensorStateClass.MEASUREMENT
if self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
elem["value"], STATE_UNKNOWN
)
else:
self._state = round(float(elem["value"]), DECIMALS)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def native_value(self):
"""Return the state of the device."""
return self._state
@property
def extra_state_attributes(self):
"""Return the attributes of the sensor."""
return {
ATTR_FEEDID: self._elem["id"],
ATTR_TAG: self._elem["tag"],
ATTR_FEEDNAME: self._elem["name"],
ATTR_SIZE: self._elem["size"],
ATTR_USERID: self._elem["userid"],
ATTR_LASTUPDATETIME: self._elem["time"],
ATTR_LASTUPDATETIMESTR: template.timestamp_local(float(self._elem["time"])),
}
def update(self):
"""Get the latest data and updates the state."""
self._data.update()
if self._data.data is None:
return
elem = next(
(
elem
for elem in self._data.data
if get_id(
self._sensorid,
elem["tag"],
elem["name"],
elem["id"],
elem["userid"],
)
== self._identifier
),
None,
)
if elem is None:
return
self._elem = elem
if self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
elem["value"], STATE_UNKNOWN
)
else:
self._state = round(float(elem["value"]), DECIMALS)
class EmonCmsData:
"""The class for handling the data retrieval."""
def __init__(self, hass, url, apikey, interval):
"""Initialize the data object."""
self._apikey = apikey
self._url = f"{url}/feed/list.json"
self._interval = interval
self._hass = hass
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Emoncms."""
try:
parameters = {"apikey": self._apikey}
req = requests.get(
self._url, params=parameters, allow_redirects=True, timeout=5
)
except requests.exceptions.RequestException as exception:
_LOGGER.error(exception)
return
else:
if req.status_code == HTTPStatus.OK:
self.data = req.json()
else:
_LOGGER.error(
"Please verify if the specified configuration value "
"'%s' is correct! (HTTP Status_code = %d)",
CONF_URL,
req.status_code,
)
| rohitranjan1991/home-assistant | homeassistant/components/emoncms/sensor.py | Python | mit | 8,487 | 0.000471 |
import eConsoleImpl
import eBaseImpl
import enigma
enigma.eTimer = eBaseImpl.eTimer
enigma.eSocketNotifier = eBaseImpl.eSocketNotifier
enigma.eConsoleAppContainer = eConsoleImpl.eConsoleAppContainer
from Tools.Profile import profile, profile_final
profile("PYTHON_START")
from enigma import runMainloop, eDVBDB, eTimer, quitMainloop, \
getDesktop, ePythonConfigQuery, eAVSwitch, eServiceEvent, \
eEPGCache
from tools import *
# Nemesis Patch
from enigma import nemTool
t = nemTool()
print "Restart EMU/CS"
t.sendCmd("/etc/init.d/restartEmu.sh &")
#End
profile("LANGUAGE")
from Components.Language import language
def setEPGLanguage():
print "language set to", language.getLanguage()
eServiceEvent.setEPGLanguage(language.getLanguage())
language.addCallback(setEPGLanguage)
from traceback import print_exc
profile("LOAD:InfoBar")
import Screens.InfoBar
from Screens.SimpleSummary import SimpleSummary
from sys import stdout, exc_info
profile("Bouquets")
eDVBDB.getInstance().reloadBouquets()
profile("ParentalControl")
from Components.ParentalControl import InitParentalControl
InitParentalControl()
profile("LOAD:Navigation")
from Navigation import Navigation
profile("LOAD:skin")
from skin import readSkin
profile("LOAD:Tools")
from Tools.Directories import InitFallbackFiles, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_SKIN
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, NoSave, ConfigSelection
InitFallbackFiles()
profile("config.misc")
config.misc.radiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "radio.mvi"))
config.misc.isNextRecordTimerAfterEventActionAuto = ConfigYesNo(default=False)
config.misc.useTransponderTime = ConfigYesNo(default=True)
config.misc.startCounter = ConfigInteger(default=0) # number of e2 starts...
config.misc.standbyCounter = NoSave(ConfigInteger(default=0)) # number of standby
config.misc.epgcache_filename = ConfigSelection(default = "/media/usb", choices = ["/media/usb", "/media/cf", "/media/hdd"])
def setEPGCachePath(configElement):
eEPGCache.getInstance().setCacheFile("%s/epg.dat" % configElement.value)
#demo code for use of standby enter leave callbacks
#def leaveStandby():
# print "!!!!!!!!!!!!!!!!!leave standby"
#def standbyCountChanged(configElement):
# print "!!!!!!!!!!!!!!!!!enter standby num", configElement.value
# from Screens.Standby import inStandby
# inStandby.onClose.append(leaveStandby)
#config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call = False)
####################################################
def useTransponderTimeChanged(configElement):
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(configElement.value)
config.misc.useTransponderTime.addNotifier(useTransponderTimeChanged)
profile("Twisted")
try:
import twisted.python.runtime
twisted.python.runtime.platform.supportsThreads = lambda: False
import e2reactor
e2reactor.install()
from twisted.internet import reactor
def runReactor():
reactor.run(installSignalHandlers=False)
except ImportError:
print "twisted not available"
def runReactor():
runMainloop()
profile("LOAD:Plugin")
# initialize autorun plugins and plugin menu entries
from Components.PluginComponent import plugins
profile("LOAD:Wizard")
from Screens.Wizard import wizardManager
from Screens.DefaultWizard import *
from Screens.StartWizard import *
from Screens.TutorialWizard import *
import Screens.Rc
from Tools.BoundFunction import boundFunction
from Plugins.Plugin import PluginDescriptor
profile("misc")
had = dict()
def dump(dir, p = ""):
if isinstance(dir, dict):
for (entry, val) in dir.items():
dump(val, p + "(dict)/" + entry)
if hasattr(dir, "__dict__"):
for name, value in dir.__dict__.items():
if not had.has_key(str(value)):
had[str(value)] = 1
dump(value, p + "/" + str(name))
else:
print p + "/" + str(name) + ":" + str(dir.__class__) + "(cycle)"
else:
print p + ":" + str(dir)
# + ":" + str(dir.__class__)
# display
profile("LOAD:ScreenGlobals")
from Screens.Globals import Globals
from Screens.SessionGlobals import SessionGlobals
from Screens.Screen import Screen
profile("Screen")
Screen.global_screen = Globals()
# Session.open:
# * push current active dialog ('current_dialog') onto stack
# * call execEnd for this dialog
# * clear in_exec flag
# * hide screen
# * instantiate new dialog into 'current_dialog'
# * create screens, components
# * read, apply skin
# * create GUI for screen
# * call execBegin for new dialog
# * set in_exec
# * show gui screen
# * call components' / screen's onExecBegin
# ... screen is active, until it calls 'close'...
# Session.close:
# * assert in_exec
# * save return value
# * start deferred close handler ('onClose')
# * execEnd
# * clear in_exec
# * hide screen
# .. a moment later:
# Session.doClose:
# * destroy screen
class Session:
def __init__(self, desktop = None, summary_desktop = None, navigation = None):
self.desktop = desktop
self.summary_desktop = summary_desktop
self.nav = navigation
self.delay_timer = eTimer()
self.delay_timer.callback.append(self.processDelay)
self.current_dialog = None
self.dialog_stack = [ ]
self.summary_stack = [ ]
self.summary = None
self.in_exec = False
self.screen = SessionGlobals(self)
for p in plugins.getPlugins(PluginDescriptor.WHERE_SESSIONSTART):
p(reason=0, session=self)
def processDelay(self):
callback = self.current_dialog.callback
retval = self.current_dialog.returnValue
if self.current_dialog.isTmp:
self.current_dialog.doClose()
# dump(self.current_dialog)
del self.current_dialog
else:
del self.current_dialog.callback
self.popCurrent()
if callback is not None:
callback(*retval)
def execBegin(self, first=True, do_show = True):
assert not self.in_exec
self.in_exec = True
c = self.current_dialog
# when this is an execbegin after a execend of a "higher" dialog,
# popSummary already did the right thing.
if first:
self.pushSummary()
summary = c.createSummary() or SimpleSummary
self.summary = self.instantiateSummaryDialog(summary, c)
self.summary.show()
c.addSummary(self.summary)
c.saveKeyboardMode()
c.execBegin()
# when execBegin opened a new dialog, don't bother showing the old one.
if c == self.current_dialog and do_show:
c.show()
def execEnd(self, last=True):
assert self.in_exec
self.in_exec = False
self.current_dialog.execEnd()
self.current_dialog.restoreKeyboardMode()
self.current_dialog.hide()
if last:
self.current_dialog.removeSummary(self.summary)
self.popSummary()
def create(self, screen, arguments, **kwargs):
# creates an instance of 'screen' (which is a class)
try:
return screen(self, *arguments, **kwargs)
except:
errstr = "Screen %s(%s, %s): %s" % (str(screen), str(arguments), str(kwargs), exc_info()[0])
print errstr
print_exc(file=stdout)
quitMainloop(5)
def instantiateDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.desktop)
def deleteDialog(self, screen):
screen.hide()
screen.doClose()
def instantiateSummaryDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.summary_desktop)
def doInstantiateDialog(self, screen, arguments, kwargs, desktop):
# create dialog
try:
dlg = self.create(screen, arguments, **kwargs)
except:
print 'EXCEPTION IN DIALOG INIT CODE, ABORTING:'
print '-'*60
print_exc(file=stdout)
quitMainloop(5)
print '-'*60
if dlg is None:
return
# read skin data
readSkin(dlg, None, dlg.skinName, desktop)
# create GUI view of this dialog
assert desktop is not None
dlg.setDesktop(desktop)
dlg.applySkin()
return dlg
def pushCurrent(self):
if self.current_dialog is not None:
self.dialog_stack.append((self.current_dialog, self.current_dialog.shown))
self.execEnd(last=False)
def popCurrent(self):
if self.dialog_stack:
(self.current_dialog, do_show) = self.dialog_stack.pop()
self.execBegin(first=False, do_show=do_show)
else:
self.current_dialog = None
def execDialog(self, dialog):
self.pushCurrent()
self.current_dialog = dialog
self.current_dialog.isTmp = False
self.current_dialog.callback = None # would cause re-entrancy problems.
self.execBegin()
def openWithCallback(self, callback, screen, *arguments, **kwargs):
dlg = self.open(screen, *arguments, **kwargs)
dlg.callback = callback
return dlg
def open(self, screen, *arguments, **kwargs):
if self.dialog_stack and not self.in_exec:
raise RuntimeError("modal open are allowed only from a screen which is modal!")
# ...unless it's the very first screen.
self.pushCurrent()
dlg = self.current_dialog = self.instantiateDialog(screen, *arguments, **kwargs)
dlg.isTmp = True
dlg.callback = None
self.execBegin()
return dlg
def close(self, screen, *retval):
if not self.in_exec:
print "close after exec!"
return
# be sure that the close is for the right dialog!
# if it's not, you probably closed after another dialog
# was opened. this can happen if you open a dialog
# onExecBegin, and forget to do this only once.
# after close of the top dialog, the underlying will
# gain focus again (for a short time), thus triggering
# the onExec, which opens the dialog again, closing the loop.
assert screen == self.current_dialog
self.current_dialog.returnValue = retval
self.delay_timer.start(0, 1)
self.execEnd()
def pushSummary(self):
if self.summary is not None:
self.summary.hide()
self.summary_stack.append(self.summary)
self.summary = None
def popSummary(self):
if self.summary is not None:
self.summary.doClose()
self.summary = self.summary_stack.pop()
if self.summary is not None:
self.summary.show()
profile("Standby,PowerKey")
import Screens.Standby
from Screens.Menu import MainMenu, mdom
from GlobalActions import globalActionMap
class PowerKey:
""" PowerKey stuff - handles the powerkey press and powerkey release actions"""
def __init__(self, session):
self.session = session
globalActionMap.actions["power_down"]=self.powerdown
globalActionMap.actions["power_up"]=self.powerup
globalActionMap.actions["power_long"]=self.powerlong
globalActionMap.actions["deepstandby"]=self.shutdown # frontpanel long power button press
self.standbyblocked = 1
def MenuClosed(self, *val):
self.session.infobar = None
def shutdown(self):
print "PowerOff - Now!"
if not Screens.Standby.inTryQuitMainloop and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND:
self.session.open(Screens.Standby.TryQuitMainloop, 1)
def powerlong(self):
if Screens.Standby.inTryQuitMainloop or (self.session.current_dialog and not self.session.current_dialog.ALLOW_SUSPEND):
return
self.doAction(action = config.usage.on_long_powerpress.value)
def doAction(self, action):
self.standbyblocked = 1
if action == "shutdown":
self.shutdown()
elif action == "show_menu":
print "Show shutdown Menu"
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == "shutdown":
self.session.infobar = self
menu_screen = self.session.openWithCallback(self.MenuClosed, MainMenu, x)
menu_screen.setTitle(_("Standby / Restart"))
return
elif action == "standby":
self.standby()
def powerdown(self):
self.standbyblocked = 0
def powerup(self):
if self.standbyblocked == 0:
self.doAction(action = config.usage.on_short_powerpress.value)
def standby(self):
if not Screens.Standby.inStandby and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND and self.session.in_exec:
self.session.open(Screens.Standby.Standby)
profile("Scart")
from Screens.Scart import Scart
class AutoScartControl:
def __init__(self, session):
self.force = False
self.current_vcr_sb = eAVSwitch.getInstance().getVCRSlowBlanking()
if self.current_vcr_sb and config.av.vcrswitch.value:
self.scartDialog = session.instantiateDialog(Scart, True)
else:
self.scartDialog = session.instantiateDialog(Scart, False)
config.av.vcrswitch.addNotifier(self.recheckVCRSb)
eAVSwitch.getInstance().vcr_sb_notifier.get().append(self.VCRSbChanged)
def recheckVCRSb(self, configElement):
self.VCRSbChanged(self.current_vcr_sb)
def VCRSbChanged(self, value):
#print "vcr sb changed to", value
self.current_vcr_sb = value
if config.av.vcrswitch.value or value > 2:
if value:
self.scartDialog.showMessageBox()
else:
self.scartDialog.switchToTV()
profile("Load:CI")
from enigma import eDVBCIInterfaces
from Screens.Ci import CiHandler
profile("Load:VolumeControl")
from Components.VolumeControl import VolumeControl
def runScreenTest():
config.misc.startCounter.value += 1
profile("readPluginList")
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
profile("Init:Session")
nav = Navigation(config.misc.isNextRecordTimerAfterEventActionAuto.value)
session = Session(desktop = getDesktop(0), summary_desktop = getDesktop(1), navigation = nav)
CiHandler.setSession(session)
screensToRun = [ p.__call__ for p in plugins.getPlugins(PluginDescriptor.WHERE_WIZARD) ]
profile("wizards")
screensToRun += wizardManager.getWizards()
screensToRun.append((100, Screens.InfoBar.InfoBar))
screensToRun.sort()
ePythonConfigQuery.setQueryFunc(configfile.getResolvedKey)
# eDVBCIInterfaces.getInstance().setDescrambleRules(0 # Slot Number
# ,( ["1:0:1:24:4:85:C00000:0:0:0:"], #service_list
# ["PREMIERE"], #provider_list,
# [] #caid_list
# ));
def runNextScreen(session, screensToRun, *result):
if result:
quitMainloop(*result)
return
screen = screensToRun[0][1]
args = screensToRun[0][2:]
if screensToRun:
session.openWithCallback(boundFunction(runNextScreen, session, screensToRun[1:]), screen, *args)
else:
session.open(screen, *args)
config.misc.epgcache_filename.addNotifier(setEPGCachePath)
runNextScreen(session, screensToRun)
profile("Init:VolumeControl")
vol = VolumeControl(session)
profile("Init:PowerKey")
power = PowerKey(session)
# we need session.scart to access it from within menu.xml
session.scart = AutoScartControl(session)
profile("RunReactor")
profile_final()
runReactor()
config.misc.startCounter.save()
profile("wakeup")
from time import time, strftime, localtime
from Tools.DreamboxHardware import setFPWakeuptime, getFPWakeuptime, setRTCtime
#get currentTime
nowTime = time()
wakeupList = [
x for x in ((session.nav.RecordTimer.getNextRecordingTime(), 0, session.nav.RecordTimer.isNextRecordAfterEventActionAuto()),
(session.nav.RecordTimer.getNextZapTime(), 1),
(plugins.getNextWakeupTime(), 2))
if x[0] != -1
]
wakeupList.sort()
recordTimerWakeupAuto = False
if wakeupList:
from time import strftime
startTime = wakeupList[0]
if (startTime[0] - nowTime) < 270: # no time to switch box back on
wptime = nowTime + 30 # so switch back on in 30 seconds
else:
wptime = startTime[0] - 240
if not config.misc.useTransponderTime.value:
print "dvb time sync disabled... so set RTC now to current linux time!", strftime("%Y/%m/%d %H:%M", localtime(nowTime))
setRTCtime(nowTime)
print "set wakeup time to", strftime("%Y/%m/%d %H:%M", localtime(wptime))
setFPWakeuptime(wptime)
recordTimerWakeupAuto = startTime[1] == 0 and startTime[2]
config.misc.isNextRecordTimerAfterEventActionAuto.value = recordTimerWakeupAuto
config.misc.isNextRecordTimerAfterEventActionAuto.save()
profile("stopService")
session.nav.stopService()
profile("nav shutdown")
session.nav.shutdown()
profile("configfile.save")
configfile.save()
return 0
profile("Init:skin")
import skin
skin.loadSkinData(getDesktop(0))
profile("InputDevice")
import Components.InputDevice
Components.InputDevice.InitInputDevices()
profile("AVSwitch")
import Components.AVSwitch
Components.AVSwitch.InitAVSwitch()
profile("RecordingConfig")
import Components.RecordingConfig
Components.RecordingConfig.InitRecordingConfig()
profile("UsageConfig")
import Components.UsageConfig
Components.UsageConfig.InitUsageConfig()
profile("keymapparser")
import keymapparser
keymapparser.readKeymap(config.usage.keymap.value)
profile("Network")
import Components.Network
Components.Network.InitNetwork()
profile("LCD")
import Components.Lcd
Components.Lcd.InitLcd()
profile("SetupDevices")
import Components.SetupDevices
Components.SetupDevices.InitSetupDevices()
profile("RFMod")
import Components.RFmod
Components.RFmod.InitRFmod()
profile("Init:CI")
import Screens.Ci
Screens.Ci.InitCiConfig()
#from enigma import dump_malloc_stats
#t = eTimer()
#t.callback.append(dump_malloc_stats)
#t.start(1000)
# first, setup a screen
try:
runScreenTest()
plugins.shutdown()
from Components.ParentalControl import parentalControl
parentalControl.save()
except:
print 'EXCEPTION IN PYTHON STARTUP CODE:'
print '-'*60
print_exc(file=stdout)
quitMainloop(5)
print '-'*60
| kingvuplus/nn-gui | mytest.py | Python | gpl-2.0 | 17,117 | 0.026757 |
from django.core.management.base import NoArgsCommand
from django_mailer import models
from django_mailer.management.commands import create_handler
from optparse import make_option
import logging
class Command(NoArgsCommand):
help = 'Place deferred messages back in the queue.'
option_list = NoArgsCommand.option_list + (
make_option('-m', '--max-retries', type='int',
help="Don't reset deferred messages with more than this many "
"retries."),
)
def handle_noargs(self, verbosity, max_retries=None, **options):
# Send logged messages to the console.
logger = logging.getLogger('django_mailer')
handler = create_handler(verbosity)
logger.addHandler(handler)
count = models.QueuedMessage.objects.retry_deferred(
max_retries=max_retries)
if count:
logger = logging.getLogger('django_mailer.commands.retry_deferred')
logger.warning("%s deferred message%s placed back in the queue" %
(count, count != 1 and 's' or ''))
logger.removeHandler(handler)
| GreenLightGo/django-mailer-2 | django_mailer/management/commands/retry_deferred.py | Python | mit | 1,161 | 0.001723 |
from __future__ import print_function, absolute_import
from .script_interface import ScriptInterfaceHelper, script_interface_register
@script_interface_register
class ComFixed(ScriptInterfaceHelper):
"""Fix the center of mass of specific types.
Subtracts mass-weighted fraction of the total
force action on all particles of the type from
the particles after each force calculation. This
keeps the center of mass of the type fixed iff
the total momentum of the type is zero.
Parameters
----------
types : array_like
List of types of which the center of mass
should be fixed.
"""
_so_name = "ComFixed"
_so_creation_policy = "GLOBAL"
| KonradBreitsprecher/espresso | src/python/espressomd/comfixed.py | Python | gpl-3.0 | 699 | 0.001431 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/multi_arguments_results_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
from tensorflow.python.ops import array_ops
# Tests multiple inputs and outputs with index paths.
# CHECK-LABEL: func @key(
# CHECK-SAME: %[[ARG0:.*]]: tensor<3x5xf32> {tf_saved_model.index_path = ["y"]}
# CHECK-SAME: %[[ARG1:.*]]: tensor<5x3xf32> {tf_saved_model.index_path = ["x"]}
# CHECK-SAME: tensor<3x3xf32> {tf_saved_model.index_path = ["t"]}
# CHECK-SAME: tensor<5x5xf32> {tf_saved_model.index_path = ["s"]}
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK-DAG: %[[MUL0:.*]] = "tf.MatMul"(%[[ARG1]], %[[ARG0]])
# CHECK-DAG: %[[MUL1:.*]] = "tf.MatMul"(%[[ARG0]], %[[ARG1]])
# CHECK: %[[IDENTITY:.*]]:2 = "tf.IdentityN"(%[[MUL1]], %[[MUL0]])
# CHECK: return %[[IDENTITY]]#0, %[[IDENTITY]]#1
# CHECK-LABEL: func @key2(
# CHECK-SAME: %[[ARG1:.*]]: tensor<5x3xf32> {tf_saved_model.index_path = ["b"]}
# CHECK-SAME: %[[ARG0:.*]]: tensor<3x5xf32> {tf_saved_model.index_path = ["a"]}
# CHECK-SAME: tensor<5x5xf32> {tf_saved_model.index_path = ["d"]}
# CHECK-SAME: tensor<3x3xf32> {tf_saved_model.index_path = ["c"]}
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key2"]
# CHECK-DAG: %[[MUL1:.*]] = "tf.MatMul"(%[[ARG0]], %[[ARG1]])
# CHECK-DAG: %[[MUL2:.*]] = "tf.MatMul"(%[[ARG1]], %[[ARG0]])
# CHECK: %[[IDENTITY:.*]]:2 = "tf.IdentityN"(%[[MUL1]], %[[MUL2]])
# CHECK: return %[[IDENTITY]]#1, %[[IDENTITY]]#0
def Test():
x = tf.constant(1.0, shape=(5, 3))
y = tf.constant(1.0, shape=(3, 5))
s = tf.matmul(x, y)
t = tf.matmul(y, x)
[t, s] = array_ops.identity_n([t, s])
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)
tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)
tensor_info_t = tf.compat.v1.saved_model.utils.build_tensor_info(t)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={
'x': tensor_info_x,
'y': tensor_info_y
},
outputs={
's': tensor_info_s,
't': tensor_info_t
},
method_name='some_function')),
'key2': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={
'a': tensor_info_y,
'b': tensor_info_x,
},
outputs={
'c': tensor_info_t,
'd': tensor_info_s,
},
method_name='reverse_arguments'))
}, None, None
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test)
| tensorflow/tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_arguments_results_v1.py | Python | apache-2.0 | 3,570 | 0.005882 |
from django.contrib import admin
from newspaper.news.models import News, Event
class NewsAdmin(admin.ModelAdmin):
list_display = ('title', 'publish_date')
list_filter = ('publish_date',)
search_fields = ('title',)
class EventAdmin(admin.ModelAdmin):
pass
admin.site.register(News, NewsAdmin)
admin.site.register(Event, EventAdmin) | openwebinars-django/newspaper | newspaper/newspaper/news/admin.py | Python | apache-2.0 | 353 | 0.002833 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
import six
from pants.build_graph.address import Address
from pants.engine.exp.addressable import AddressableDescriptor, TypeConstraintError
from pants.engine.exp.mapper import MappingError
from pants.engine.exp.objects import Resolvable, Serializable, SerializableFactory, Validatable
class ResolveError(Exception):
"""Indicates an error resolving an address to an object."""
class CycleError(ResolveError):
"""Indicates a cycle was detected during object resolution."""
class ResolvedTypeMismatchError(ResolveError):
"""Indicates a resolved object was not of the expected type."""
class Resolver(Resolvable):
"""Lazily resolves addressables using a graph."""
def __init__(self, graph, address):
self._graph = graph
self._address = address
def address(self):
return self._address.spec
def resolve(self):
return self._graph.resolve(self._address)
def __hash__(self):
return hash((self._graph, self._address))
def __eq__(self, other):
return (isinstance(other, Resolver) and
(self._graph, self._address) == (other._graph, other._address))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'Graph.Resolver(graph={}, address={!r})'.format(self._graph, self._address)
class Graph(object):
"""A lazy, directed acyclic graph of objects. Not necessarily connected."""
def __init__(self, address_mapper, inline=False):
"""Creates a build graph composed of addresses resolvable by an address mapper.
:param address_mapper: An address mapper that can resolve the objects addresses point to.
:type address_mapper: :class:`pants.engine.exp.mapper.AddressMapper`.
:param bool inline: If `True`, resolved addressables are inlined in the containing object;
otherwise a resolvable pointer is used that dynamically traverses to the
addressable on every access.
"""
self._address_mapper = address_mapper
# TODO(John Sirois): This will need to be eliminated in favor of just using the AddressMapper
# caching or else also expose an invalidation interface based on address.spec_path - aka
# AddressMapper.namespace.
#
# Our resolution cache.
self._resolved_by_address = {}
self._inline = inline
def resolve(self, address):
"""Resolves the object pointed at by the given `address`.
The object will be hydrated from the BUILD graph along with any objects it points to.
The following lifecycle for resolved objects is observed:
1. The object's containing BUILD file family is parsed if not already parsed. This is a 'thin'
parse that just hydrates immediate fields of objects defined in the BUILD file family.
2. The object's addressed values are all first resolved completely if not already resolved.
3. The object is reconstructed using the fully resolved values from step 2.
4. If the reconstructed object is a :class:`pants.engine.exp.objects.SerializableFactory`, its
`create` method is called to allow for a replacement object to be supplied.
5. The reconstructed object from step 3 (or replacement object from step 4) is validated if
it's an instance of :class:`pants.engine.exp.objects.Validatable`.
6. The fully resolved and validated object is cached and returned.
:param address: The BUILD graph address to resolve.
:type address: :class:`pants.build_graph.address.Address`
:returns: The object pointed at by the given `address`.
:raises: :class:`ResolveError` if no object was found at the given `address`.
:raises: :class:`pants.engine.exp.objects.ValidationError` if the object was resolvable but
invalid.
"""
try:
return self._resolve_recursively(address)
except MappingError as e:
raise ResolveError('Failed to resolve {}: {}'.format(address, e))
def _resolve_recursively(self, address, resolve_path=None):
resolved = self._resolved_by_address.get(address)
if resolved:
return resolved
resolve_path = resolve_path or []
if address in resolve_path:
raise CycleError('Cycle detected along path:\n\t{}'
.format('\n\t'.join('* {}'.format(a) if a == address else str(a)
for a in resolve_path + [address])))
resolve_path.append(address)
obj = self._address_mapper.resolve(address)
def parse_addr(a):
return Address.parse(a, relative_to=address.spec_path)
def resolve_item(item, addr=None):
if Serializable.is_serializable(item):
hydrated_args = {'address': addr} if addr else {}
# Recurse on the Serializable's values and hydrates any addressables found. This unwinds
# from the leaves thus hydrating item's closure in the inline case.
for key, value in item._asdict().items():
is_addressable = AddressableDescriptor.is_addressable(item, key)
def maybe_addr(x):
return parse_addr(x) if is_addressable and isinstance(x, six.string_types) else x
if isinstance(value, collections.MutableMapping):
container_type = type(value)
container = container_type()
container.update((k, resolve_item(maybe_addr(v))) for k, v in value.items())
hydrated_args[key] = container
elif isinstance(value, collections.MutableSequence):
container_type = type(value)
hydrated_args[key] = container_type(resolve_item(maybe_addr(v)) for v in value)
else:
hydrated_args[key] = resolve_item(maybe_addr(value))
# Re-build the thin Serializable with either fully hydrated objects or Resolvables
# substituted for all Address values; ie: Only ever expose fully resolved or resolvable
# closures for requested addresses.
return self._hydrate(type(item), **hydrated_args)
elif isinstance(item, Address):
if self._inline:
return self._resolve_recursively(item, resolve_path)
else:
# TODO(John Sirois): Implement lazy cycle checks across Resolver chains.
return Resolver(self, address=item)
else:
return item
resolved = resolve_item(obj, addr=address)
resolve_path.pop(-1)
self._resolved_by_address[address] = resolved
return resolved
@staticmethod
def _hydrate(item_type, **kwargs):
try:
item = item_type(**kwargs)
except TypeConstraintError as e:
raise ResolvedTypeMismatchError(e)
# Let factories replace the hydrated object.
if isinstance(item, SerializableFactory):
item = item.create()
# Finally make sure objects that can self-validate get a chance to do so before we cache
# them as the pointee of `hydrated_item.address`.
if isinstance(item, Validatable):
item.validate()
return item
| slyphon/pants | src/python/pants/engine/exp/graph.py | Python | apache-2.0 | 7,197 | 0.009032 |
__author__ = 'tbri'
from openerp import models, fields, api, _
class add_sponsorship_wizard(models.TransientModel):
_name = 'add_sponsorship_wizard'
def _get_all_children(self):
c = []
children = self.env['res.partner'].search([('sponsored_child', '=', 'True')])
for n in children:
child_ref = '%s %s' % (n.child_ident, n.name)
c.append( (n.id, child_ref) )
return c
#sponsor_id = fields.Many2one('sponsor')
# see partner.py...........
## child_id = fields.Many2one('sponsored_child', domain=[('active','=',True)])
child_id = fields.Selection( _get_all_children , string=_('Child'))
sub_sponsor = fields.Many2one('res.partner', _('Sub Sponsor'), domain=[('sub_sponsor','=',True)])
start_date = fields.Date(_('Start date'))
end_date = fields.Date(_('End date'))
@api.one
def data_save(self):
print "DATA_SAVE 1", self._context
"""
DATA_SAVAE! {'lang': 'en_US', 'search_disable_custom_filters': True, 'tz': False, 'uid': 1, 'active_model': 'sponsor', 'active_ids': [1], 'active_id': 1}
"""
model = self._context['active_model']
active_id = self._context['active_id']
assert model == 'res.partner'
sponsor = self.env['res.partner'].browse(active_id)
assert sponsor.sponsor
print "DATA_SAVE 2", sponsor
print "DATA_SAVE 3", self.child_id
sponsorship = {'sponsor_id' : active_id,
'sponsored_child' : int(self.child_id),
'start_date' : self.start_date,
'end_date' : self.end_date,
'sub_sponsor' : self.sub_sponsor}
print "CREATING SPONSORSHP"
self.env['sponsorship'].create( sponsorship)
return {'type': 'ir.actions.act_window_close'}
| bringsvor/sponsor | wizards/add_sponsorship.py | Python | agpl-3.0 | 1,922 | 0.011967 |
# -*- coding: utf-8 -*-
"""
analytics.models
Models for Demand and Supply data
:copyright: (c) 2013 by Openlabs Technologies & Consulting (P) Limited
:license: see LICENSE for more details.
"""
import operator
from django.db import models
import django.contrib.admin
from admin.models import Occupation, Institution, Company, SubSector
__all__ = ['DEGREE_CHOICES', 'REGION_CHOICES', 'State', 'City', 'SupplyBase',
'DemandData', 'CompanyYearData', 'DiversityRatioLevel',
'DiversityRatioSubsector', 'GenderDiversity', 'ITSpend',
'RevenueSubsector', 'RevenueOccupation', 'RevenueTotal',
'TalentSaturation']
DEGREE_CHOICES = (
('UG', 'Undergraduate Degree'),
('PG', 'Postgraduate Degree'),
('DOC', 'Ph.D/M.Phil'),
('PSD', 'Post School Diploma'),
('PGD', 'Post Graduate Diploma'),
('UNK', 'Unknown'),
)
REGION_CHOICES = (
('NORTH', 'North'),
('SOUTH', 'South'),
('EAST', 'East'),
('WEST', 'West'),
('CENTRAL', 'Central'),
)
class State(models.Model):
"""
States
"""
name = models.CharField(max_length=50, default=None, unique=True)
region = models.CharField(max_length=12, choices=REGION_CHOICES)
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('name', 'region',)
def __unicode__(self):
"""
Returns object display name
"""
return self.name
class City(models.Model):
"""
Cities
"""
name = models.CharField(max_length=50, default=None)
state = models.ForeignKey('State')
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('name', 'state',)
verbose_name_plural = 'Cities'
def __unicode__(self):
"""
Returns object display name
"""
return "%s,%s" % (self.name, self.state)
class SupplyBase(models.Model):
"""
Demand supply data
"""
year = models.IntegerField()
city = models.ForeignKey('City')
occupation = models.ForeignKey(Occupation)
institution = models.ForeignKey(Institution)
degree = models.CharField(max_length=3, choices=DEGREE_CHOICES,
default=None)
supply = models.IntegerField()
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'city', 'occupation', 'institution',
'degree',)
verbose_name_plural = 'SupplyBase'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s,%s" % (self.year, self.city, self.occupation,)
class DemandData(models.Model):
"""
Demand data
"""
year = models.IntegerField()
city = models.ForeignKey('City')
occupation = models.ForeignKey(Occupation)
company = models.ForeignKey(Company)
demand = models.IntegerField()
headcount = models.IntegerField()
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'city', 'occupation', 'company',)
verbose_name_plural = 'DemandBase'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s,%s" % (self.year, self.city, self.occupation,)
class CompanyYearData(models.Model):
"""
Revenue, Headcount data for companies annually
"""
year = models.IntegerField()
company = models.ForeignKey(Company)
revenue = models.IntegerField()
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'company', )
verbose_name_plural = 'Company Annual Data'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s" % (self.year, self.company, )
class DiversityRatioLevel(models.Model):
"""
Diversity ratio for levels
"""
year = models.IntegerField(unique=True)
male_leadership = models.IntegerField(
verbose_name='Percent Male in Leadership roles'
)
male_entry = models.IntegerField(
verbose_name='Percent Male in Entry Level roles'
)
male_middle = models.IntegerField(
verbose_name='Percent Male in Middle Level roles'
)
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
@property
def female_leadership(self):
"Percent Females in leadership level roles"
return 100 - self.male_leadership
@property
def female_entry(self):
"Percent Females in entry level roles"
return 100 - self.male_entry
@property
def female_middle(self):
"Percent Females in middle level roles"
return 100 - self.male_middle
class Meta:
verbose_name_plural = 'Diversity Ratio for Experience Levels'
def __unicode__(self):
"""
Returns object display name
"""
return "%d" % (self.year, )
class DiversityRatioSubsector(models.Model):
"""
Diversity ratio for subsector
"""
year = models.IntegerField()
subsector = models.ForeignKey(SubSector, verbose_name='Sub-sector')
male = models.IntegerField(verbose_name='Percent males in subsector')
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
@property
def female(self):
"Percent Females in subsector"
return 100 - self.male
class Meta:
unique_together = ('year', 'subsector', )
verbose_name_plural = 'Diversity Ratio for Subsector'
def __unicode__(self):
"""
Returns object display name
"""
return "%d, %s" % (self.year, self.subsector, )
class GenderDiversity(models.Model):
"""
Gender diversity as per course
"""
year = models.IntegerField()
category = models.CharField(max_length=60)
male = models.IntegerField()
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'category', )
verbose_name_plural = 'Gender Diversity'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s" % (self.year, self.category, )
class ITSpend(models.Model):
"""
IT Spend data
"""
year = models.IntegerField()
sub_sector = models.ForeignKey(SubSector, verbose_name='Sub-sector')
world_spend = models.IntegerField(verbose_name='World IT Spend')
india_revenue = models.IntegerField(verbose_name='Indian IT Revenue')
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'sub_sector', )
verbose_name_plural = 'IT Spend'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s" % (self.year, self.sub_sector, )
class RevenueSubsector(models.Model):
"""
Revenue per subsector
"""
year = models.IntegerField()
sub_sector = models.ForeignKey(SubSector)
revenue = models.IntegerField()
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'sub_sector', )
verbose_name_plural = 'Revenue by Subsector'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s" % (self.year, self.sub_sector, )
class RevenueOccupation(models.Model):
"""
Revenue by occupation
"""
year = models.IntegerField()
occupation = models.ForeignKey(Occupation)
revenue = models.IntegerField()
cagr_next_7_years = models.IntegerField(
verbose_name='CAGR % for next 7 years'
)
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('year', 'occupation', )
verbose_name_plural = 'Revenue by occupation'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%s" % (self.year, self.occupation, )
@property
def revenue_after_7year(self):
return int(self.revenue * (1 + self.cagr_next_7_years / 100.0) ** 7)
class RevenueTotal(models.Model):
"""
Total revenue
"""
year = models.IntegerField(unique=True)
revenue = models.IntegerField()
most_likely_growth = models.IntegerField(
verbose_name='Most likely growth percent',
blank=True,
null=True
)
optimistic_growth = models.IntegerField(
verbose_name='Optimisitc growth percent',
blank=True,
null=True
)
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = 'Total Revenues'
def __unicode__(self):
"""
Returns object display name
"""
return "%d,%d" % (self.year, self.revenue, )
@property
def growth_series(self):
"""
Return growth and most likely series
"""
resultset = RevenueTotal.objects.filter(year__lte=self.year)
optimistic_series = []
most_likely_series = []
years = []
for result in resultset:
most_likely_series.append(result.revenue)
optimistic_series.append(result.revenue)
years.append(result.year)
for i in range(7):
optimistic_series.append(
int(optimistic_series[-1] *
(1 + self.optimistic_growth / 100.0))
)
most_likely_series.append(
int(most_likely_series[-1] *
(1 + self.most_likely_growth / 100.0))
)
years.append(years[-1] + 1)
return {
'years': years,
'optimistic_series': optimistic_series,
'most_likely_series': most_likely_series,
}
class TalentSaturation(models.Model):
"""
Model for talent saturation
We are keeping headcount because we sum from other models is not equal
to the one in worksheet. Perhaps due to lack of data from all
companies.
"""
year = models.IntegerField(unique=True)
headcount = models.IntegerField()
attrition_pc = models.DecimalField(
max_digits=5,
decimal_places=2,
verbose_name="Annual Attrition (%)",
default=5.0,
)
cagr_pc = models.DecimalField(
max_digits=5,
decimal_places=2,
verbose_name="CAGR (%)",
default=8.6
)
fresher_hiring_pc = models.DecimalField(
max_digits=5,
decimal_places=2,
verbose_name="Fresher Hiring (%)",
default=95.0
)
need_for_experience_pc = models.DecimalField(
max_digits=5,
decimal_places=2,
verbose_name="Need for > 2 years experienced (% of headcount)",
default=45.0
)
create_date = models.DateTimeField(auto_now_add=True)
write_date = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = 'Talent Saturation'
def __unicode__(self):
"""
Returns object display name
"""
return "%d" % (self.year, )
@property
def quitters(self):
return int(self.headcount * self.attrition_pc / 100)
def series(self):
"Return talent saturation series"
years = []
records = TalentSaturation.objects.filter(year__lte=self.year) \
.order_by('year')
headcounts = [record.headcount for record in records]
years = [record.year for record in records] + \
range(self.year + 1, self.year + 8)
for i in range(7):
headcounts.append(int(headcounts[-1] * (1 + self.cagr_pc / 100)))
# difference between headcounts
hirings = map(
operator.sub, headcounts, [headcounts[0]] + headcounts[:-1],
)
quitters = [record.quitters for record in records]
for i in range(7):
quitters.append(int(quitters[-1] * (1 + self.cagr_pc / 100)))
gross_hiring = map(operator.add, quitters, hirings)
fresher_pcs = [record.fresher_hiring_pc for record in records] + \
[self.fresher_hiring_pc] * 7
fresher_hiring = map(
lambda g, f: int(g * f / 100),
gross_hiring, fresher_pcs
)
experience_need = map(
lambda record: int(
record.headcount * record.need_for_experience_pc / 100
),
records
)
experience_need += map(
lambda x: int(x * self.need_for_experience_pc / 100),
headcounts[-7:]
)
demand = map(
operator.sub,
experience_need, [experience_need[0]] + experience_need[:-1],
)
potential_supply = map(
lambda x: int(x * (self.fresher_hiring_pc / 100) ** 2),
[0, 0] + fresher_hiring[:-2]
)
return {
'years': years[3:],
'demand': demand[3:],
'potential_supply': potential_supply[3:],
}
django.contrib.admin.site.register(State)
django.contrib.admin.site.register(City)
django.contrib.admin.site.register(SupplyBase)
django.contrib.admin.site.register(DemandData)
django.contrib.admin.site.register(CompanyYearData)
django.contrib.admin.site.register(DiversityRatioLevel)
django.contrib.admin.site.register(DiversityRatioSubsector)
django.contrib.admin.site.register(GenderDiversity)
django.contrib.admin.site.register(ITSpend)
django.contrib.admin.site.register(RevenueSubsector)
django.contrib.admin.site.register(RevenueOccupation)
django.contrib.admin.site.register(RevenueTotal)
django.contrib.admin.site.register(TalentSaturation)
| arpitprogressive/arpittest | apps/analytics/models.py | Python | bsd-3-clause | 14,238 | 0.000702 |
#!/usr/bin/env python
import os, sys, webbrowser
try: from subprocess import getstatusoutput # Python3
except: from commands import getstatusoutput # Python2
def shell_command(command): # do the command and print the output
cmdResults = getstatusoutput(command)
if True: # not cmdResults[0]:
for theLine in cmdResults[1].splitlines():
print(theLine.partition('==')[0])
if __name__ == '__main__':
port = os.getenv('VCAP_APP_PORT', None)
if port: # running on Bluemix
shell_command('python manage.py runserver --noreload 0.0.0.0:' + port)
else: # running locally
webbrowser.open('http://127.0.0.1:8000')
shell_command('python3 manage.py runserver')
| cclauss/In-Harms-Way | server/run_django.py | Python | apache-2.0 | 727 | 0.016506 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
from flask import Flask, request
from google.appengine.api import users
from models import User
from tasks import create_dataset, create_click_log_data
app = Flask(__name__)
@app.route('/_admin/createbq', methods=['GET'])
def create_bq():
result = create_dataset()
return result, 200
@app.route('/_admin/createtestdata', methods=['GET'])
def create_test_data():
team_id = request.cookies.get('team', False)
user_key_name = "{}_{}".format(team_id, users.get_current_user().user_id())
user_entity = User.get_by_id(user_key_name)
result = create_click_log_data(user_entity.team)
return result, 200
# [END app]
| yosukesuzuki/url-shortner | admin.py | Python | bsd-2-clause | 1,232 | 0 |
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.lib.kernel import connect_qtconsole
from IPython.kernel.zmq.kernelapp import IPKernelApp
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def mpl_kernel(gui):
"""Launch and return an IPython kernel with matplotlib support for the desired gui
"""
kernel = IPKernelApp.instance()
kernel.initialize(['python', '--matplotlib=%s' % gui,
#'--log-level=10'
])
return kernel
class InternalIPKernel(object):
def init_ipkernel(self, backend):
# Start IPython kernel with GUI event loop and mpl support
self.ipkernel = mpl_kernel(backend)
# To create and track active qt consoles
self.consoles = []
# This application will also act on the shell user namespace
self.namespace = self.ipkernel.shell.user_ns
# Example: a variable that will be seen by the user in the shell, and
# that the GUI modifies (the 'Counter++' button increments it):
self.namespace['app_counter'] = 0
#self.namespace['ipkernel'] = self.ipkernel # dbg
def print_namespace(self, evt=None):
print("\n***Variables in User namespace***")
for k, v in self.namespace.items():
if not k.startswith('_'):
print('%s -> %r' % (k, v))
sys.stdout.flush()
def new_qt_console(self, evt=None):
"""start a new qtconsole connected to our kernel"""
return connect_qtconsole(self.ipkernel.connection_file, profile=self.ipkernel.profile)
def count(self, evt=None):
self.namespace['app_counter'] += 1
def cleanup_consoles(self, evt=None):
for c in self.consoles:
c.kill()
| pioneers/topgear | ipython-in-depth/examples/Embedding/internal_ipkernel.py | Python | apache-2.0 | 2,018 | 0.004955 |
"""Interactive (widget based) artists."""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, RadioButtons
from types import SimpleNamespace
from ._helpers import create_figure, plot_colorbar, add_sideplot
from ._base import _order_for_imshow
from ._colors import colormaps
from ..exceptions import DimensionalityError
from .. import kit as wt_kit
from .. import data as wt_data
__all__ = ["interact2D"]
class Focus:
def __init__(self, axes, linewidth=2):
self.axes = axes
self.linewidth = linewidth
ax = axes[0]
for side in ["top", "bottom", "left", "right"]:
ax.spines[side].set_linewidth(self.linewidth)
self.focus_axis = ax
def __call__(self, ax):
if type(ax) == str:
ind = self.axes.index(self.focus_axis)
if ax == "next":
ind -= 1
elif ax == "previous":
ind += 1
ax = self.axes[ind % len(self.axes)]
if self.focus_axis == ax or ax not in self.axes:
return
else: # set new focus
for spine in ["top", "bottom", "left", "right"]:
self.focus_axis.spines[spine].set_linewidth(1)
ax.spines[spine].set_linewidth(self.linewidth)
self.focus_axis = ax
def _at_dict(data, sliders, xaxis, yaxis):
return {
a.natural_name: (a[:].flat[int(sliders[a.natural_name].val)], a.units)
for a in data.axes
if a not in [xaxis, yaxis]
}
def get_axes(data, axes):
xaxis, yaxis = axes
if type(xaxis) in [int, str]:
xaxis = wt_kit.get_index(data.axis_names, xaxis)
xaxis = data.axes[xaxis]
elif type(xaxis) != wt_data.Axis:
raise TypeError("invalid xaxis type {0}".format(type(xaxis)))
if type(yaxis) in [int, str]:
yaxis = wt_kit.get_index(data.axis_names, yaxis)
yaxis = data.axes[yaxis]
elif type(yaxis) != wt_data.Axis:
raise TypeError("invalid xaxis type {0}".format(type(yaxis)))
return xaxis, yaxis
def get_channel(data, channel):
if isinstance(channel, int):
channel = data.channels[channel]
elif isinstance(channel, str):
channel = [ch for ch in data.channels if ch.natural_name == channel][0]
elif type(channel) != wt_data.Channel:
raise TypeError("invalid channel type {0}".format(type(channel)))
return channel
def get_colormap(channel):
if channel.signed:
cmap = "signed"
else:
cmap = "default"
cmap = colormaps[cmap]
cmap.set_bad([0.75] * 3, 1.0)
cmap.set_under([0.75] * 3, 1.0)
return cmap
def get_clim(channel, current_state):
if current_state.local:
arr = current_state.dat[channel.natural_name][:]
if channel.signed:
mag = np.nanmax(np.abs(arr))
clim = [-mag, mag]
else:
clim = [0, np.nanmax(arr)]
else:
if channel.signed:
clim = [-channel.mag(), channel.mag()]
else:
clim = [0, channel.max()]
return clim
def gen_ticklabels(points, signed=None):
step = np.nanmin(np.diff(points))
if step == 0: # zeros everywhere
ticklabels = ["" for i in range(11)]
if signed:
ticklabels[5] = "0"
else:
ticklabels[0] = "0"
return ticklabels
ordinal = np.log10(np.abs(step))
ndigits = -int(np.floor(ordinal))
if ndigits < 0:
ndigits += 1
fmt = "{0:0.0f}"
else:
fmt = "{" + "0:.{0}f".format(ndigits) + "}"
ticklabels = [fmt.format(round(point, ndigits)) for point in points]
return ticklabels
def norm(arr, signed, ignore_zero=True):
if signed:
norm = np.nanmax(np.abs(arr))
else:
norm = np.nanmax(arr)
if norm != 0 and ignore_zero:
arr /= norm
return arr
def interact2D(
data: wt_data.Data, xaxis=0, yaxis=1, channel=0, local=False, use_imshow=False, verbose=True
):
"""Interactive 2D plot of the dataset.
Side plots show x and y projections of the slice (shaded gray).
Left clicks on the main axes draw 1D slices on side plots at the coordinates selected.
Right clicks remove the 1D slices.
For 3+ dimensional data, sliders below the main axes are used to change which slice is viewed.
Parameters
----------
data : WrightTools.Data object
Data to plot.
xaxis : string, integer, or data.Axis object (optional)
Expression or index of x axis. Default is 0.
yaxis : string, integer, or data.Axis object (optional)
Expression or index of y axis. Default is 1.
channel : string, integer, or data.Channel object (optional)
Name or index of channel to plot. Default is 0.
local : boolean (optional)
Toggle plotting locally. Default is False.
use_imshow : boolean (optional)
If true, matplotlib imshow is used to render the 2D slice.
Can give better performance, but is only accurate for
uniform grids. Default is False.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
# avoid changing passed data object
data = data.copy()
# unpack
data.prune(keep_channels=channel)
channel = get_channel(data, channel)
xaxis, yaxis = get_axes(data, [xaxis, yaxis])
cmap = get_colormap(channel)
current_state = SimpleNamespace()
# create figure
nsliders = data.ndim - 2
if nsliders < 0:
raise DimensionalityError(">= 2", data.ndim)
# TODO: implement aspect; doesn't work currently because of our incorporation of colorbar
fig, gs = create_figure(width="single", nrows=7 + nsliders, cols=[1, 1, 1, 1, 1, "cbar"])
# create axes
ax0 = plt.subplot(gs[1:6, 0:5])
ax0.patch.set_facecolor("w")
cax = plt.subplot(gs[1:6, -1])
sp_x = add_sideplot(ax0, "x", pad=0.1)
sp_y = add_sideplot(ax0, "y", pad=0.1)
ax_local = plt.subplot(gs[0, 0], aspect="equal", frameon=False)
ax_title = plt.subplot(gs[0, 3], frameon=False)
ax_title.text(
0.5,
0.5,
data.natural_name,
fontsize=18,
horizontalalignment="center",
verticalalignment="center",
transform=ax_title.transAxes,
)
ax_title.set_axis_off()
# NOTE: there are more axes here for more buttons / widgets in future plans
# create lines
x_color = "#00BFBF" # cyan with increased saturation
y_color = "coral"
line_sp_x = sp_x.plot([None], [None], visible=False, color=x_color, linewidth=2)[0]
line_sp_y = sp_y.plot([None], [None], visible=False, color=y_color, linewidth=2)[0]
crosshair_hline = ax0.plot([None], [None], visible=False, color=x_color, linewidth=2)[0]
crosshair_vline = ax0.plot([None], [None], visible=False, color=y_color, linewidth=2)[0]
current_state.xarg = xaxis.points.flatten().size // 2
current_state.yarg = yaxis.points.flatten().size // 2
xdir = 1 if xaxis.points.flatten()[-1] - xaxis.points.flatten()[0] > 0 else -1
ydir = 1 if yaxis.points.flatten()[-1] - yaxis.points.flatten()[0] > 0 else -1
current_state.bin_vs_x = True
current_state.bin_vs_y = True
# create buttons
current_state.local = local
radio = RadioButtons(ax_local, (" global", " local"))
if local:
radio.set_active(1)
else:
radio.set_active(0)
for circle in radio.circles:
circle.set_radius(0.14)
# create sliders
sliders = {}
for axis in data.axes:
if axis not in [xaxis, yaxis]:
if axis.size > np.prod(axis.shape):
raise NotImplementedError("Cannot use multivariable axis as a slider")
slider_axes = plt.subplot(gs[~len(sliders), :]).axes
slider = Slider(slider_axes, axis.label, 0, axis.points.size - 1, valinit=0, valstep=1)
sliders[axis.natural_name] = slider
slider.ax.vlines(
range(axis.points.size - 1),
*slider.ax.get_ylim(),
colors="k",
linestyle=":",
alpha=0.5
)
slider.valtext.set_text(gen_ticklabels(axis.points)[0])
current_state.focus = Focus([ax0] + [slider.ax for slider in sliders.values()])
# initial xyz start are from zero indices of additional axes
current_state.dat = data.chop(
xaxis.natural_name,
yaxis.natural_name,
at=_at_dict(data, sliders, xaxis, yaxis),
verbose=False,
)[0]
clim = get_clim(channel, current_state)
ticklabels = gen_ticklabels(np.linspace(*clim, 11), channel.signed)
if clim[0] == clim[1]:
clim = [-1 if channel.signed else 0, 1]
gen_mesh = ax0.pcolormesh if not use_imshow else ax0.imshow
obj2D = gen_mesh(
current_state.dat,
cmap=cmap,
vmin=clim[0],
vmax=clim[1],
ylabel=yaxis.label,
xlabel=xaxis.label,
)
ax0.grid(b=True)
# colorbar
colorbar = plot_colorbar(
cax, cmap=cmap, label=channel.natural_name, ticks=np.linspace(clim[0], clim[1], 11)
)
colorbar.set_ticklabels(ticklabels)
fig.canvas.draw_idle()
def draw_sideplot_projections():
arr = current_state.dat[channel.natural_name][:]
xind = list(
np.array(
current_state.dat.axes[
current_state.dat.axis_expressions.index(xaxis.expression)
].shape
)
> 1
).index(True)
yind = list(
np.array(
current_state.dat.axes[
current_state.dat.axis_expressions.index(yaxis.expression)
].shape
)
> 1
).index(True)
if channel.signed:
temp_arr = np.ma.masked_array(arr, np.isnan(arr), copy=True)
temp_arr[temp_arr < 0] = 0
x_proj_pos = np.nanmean(temp_arr, axis=yind)
y_proj_pos = np.nanmean(temp_arr, axis=xind)
temp_arr = np.ma.masked_array(arr, np.isnan(arr), copy=True)
temp_arr[temp_arr > 0] = 0
x_proj_neg = np.nanmean(temp_arr, axis=yind)
y_proj_neg = np.nanmean(temp_arr, axis=xind)
x_proj = np.nanmean(arr, axis=yind)
y_proj = np.nanmean(arr, axis=xind)
alpha = 0.4
blue = "#517799" # start with #87C7FF and change saturation
red = "#994C4C" # start with #FF7F7F and change saturation
if current_state.bin_vs_x:
x_proj_norm = max(np.nanmax(x_proj_pos), np.nanmax(-x_proj_neg))
if x_proj_norm != 0:
x_proj_pos /= x_proj_norm
x_proj_neg /= x_proj_norm
x_proj /= x_proj_norm
try:
sp_x.fill_between(xaxis.points, x_proj_pos, 0, color=red, alpha=alpha)
sp_x.fill_between(xaxis.points, 0, x_proj_neg, color=blue, alpha=alpha)
sp_x.fill_between(xaxis.points, x_proj, 0, color="k", alpha=0.3)
except ValueError: # Input passed into argument is not 1-dimensional
current_state.bin_vs_x = False
sp_x.set_visible(False)
if current_state.bin_vs_y:
y_proj_norm = max(np.nanmax(y_proj_pos), np.nanmax(-y_proj_neg))
if y_proj_norm != 0:
y_proj_pos /= y_proj_norm
y_proj_neg /= y_proj_norm
y_proj /= y_proj_norm
try:
sp_y.fill_betweenx(yaxis.points, y_proj_pos, 0, color=red, alpha=alpha)
sp_y.fill_betweenx(yaxis.points, 0, y_proj_neg, color=blue, alpha=alpha)
sp_y.fill_betweenx(yaxis.points, y_proj, 0, color="k", alpha=0.3)
except ValueError:
current_state.bin_vs_y = False
sp_y.set_visible(False)
else:
if current_state.bin_vs_x:
x_proj = np.nanmean(arr, axis=yind)
x_proj = norm(x_proj, channel.signed)
try:
sp_x.fill_between(xaxis.points, x_proj, 0, color="k", alpha=0.3)
except ValueError:
current_state.bin_vs_x = False
sp_x.set_visible(False)
if current_state.bin_vs_y:
y_proj = np.nanmean(arr, axis=xind)
y_proj = norm(y_proj, channel.signed)
try:
sp_y.fill_betweenx(yaxis.points, y_proj, 0, color="k", alpha=0.3)
except ValueError:
current_state.bin_vs_y = False
sp_y.set_visible(False)
draw_sideplot_projections()
ax0.set_xlim(xaxis.points.min(), xaxis.points.max())
ax0.set_ylim(yaxis.points.min(), yaxis.points.max())
if channel.signed:
sp_x.set_ylim(-1.1, 1.1)
sp_y.set_xlim(-1.1, 1.1)
def update_sideplot_slices():
# TODO: if bins is only available along one axis, slicing should be valid along the other
# e.g., if bin_vs_y = True, then assemble slices vs x
# for now, just uniformly turn off slicing
if (not current_state.bin_vs_x) or (not current_state.bin_vs_y):
return
xlim = ax0.get_xlim()
ylim = ax0.get_ylim()
x0 = xaxis.points[current_state.xarg]
y0 = yaxis.points[current_state.yarg]
crosshair_hline.set_data(np.array([xlim, [y0, y0]]))
crosshair_vline.set_data(np.array([[x0, x0], ylim]))
at_dict = _at_dict(data, sliders, xaxis, yaxis)
at_dict[xaxis.natural_name] = (x0, xaxis.units)
side_plot_data = data.chop(yaxis.natural_name, at=at_dict, verbose=False)
side_plot = side_plot_data[0][channel.natural_name].points
side_plot = norm(side_plot, channel.signed)
line_sp_y.set_data(side_plot, yaxis.points)
side_plot_data.close()
at_dict = _at_dict(data, sliders, xaxis, yaxis)
at_dict[yaxis.natural_name] = (y0, yaxis.units)
side_plot_data = data.chop(xaxis.natural_name, at=at_dict, verbose=False)
side_plot = side_plot_data[0][channel.natural_name].points
side_plot = norm(side_plot, channel.signed)
line_sp_x.set_data(xaxis.points, side_plot)
side_plot_data.close()
def update_local(index):
if verbose:
print("normalization:", index)
current_state.local = radio.value_selected[1:] == "local"
clim = get_clim(channel, current_state)
ticklabels = gen_ticklabels(np.linspace(*clim, 11), channel.signed)
colorbar.set_ticklabels(ticklabels)
if clim[0] == clim[1]:
clim = [-1 if channel.signed else 0, 1]
obj2D.set_clim(*clim)
fig.canvas.draw_idle()
def update_slider(info, use_imshow=use_imshow):
current_state.dat.close()
current_state.dat = data.chop(
xaxis.natural_name,
yaxis.natural_name,
at={
a.natural_name: (a[:].flat[int(sliders[a.natural_name].val)], a.units)
for a in data.axes
if a not in [xaxis, yaxis]
},
verbose=False,
)[0]
for k, s in sliders.items():
s.valtext.set_text(
gen_ticklabels(data.axes[data.axis_names.index(k)].points)[int(s.val)]
)
if use_imshow:
transpose = _order_for_imshow(
current_state[xaxis.natural_name][:],
current_state[yaxis.natural_name][:],
)
obj2D.set_data(current_state.dat[channel.natural_name][:].transpose(transpose))
else:
obj2D.set_array(current_state.dat[channel.natural_name][:].ravel())
clim = get_clim(channel, current_state)
ticklabels = gen_ticklabels(np.linspace(*clim, 11), channel.signed)
if clim[0] == clim[1]:
clim = [-1 if channel.signed else 0, 1]
obj2D.set_clim(*clim)
colorbar.set_ticklabels(ticklabels)
sp_x.collections.clear()
sp_y.collections.clear()
draw_sideplot_projections()
if line_sp_x.get_visible() and line_sp_y.get_visible():
update_sideplot_slices()
fig.canvas.draw_idle()
def update_crosshairs(xarg, yarg, hide=False):
# if x0 is None or y0 is None:
# raise TypeError((x0, y0))
# find closest x and y pts in dataset
current_state.xarg = xarg
current_state.yarg = yarg
xedge = xarg in [0, xaxis.points.flatten().size - 1]
yedge = yarg in [0, yaxis.points.flatten().size - 1]
current_state.xpos = xaxis.points[xarg]
current_state.ypos = yaxis.points[yarg]
if not hide: # update crosshairs and show
if verbose:
print(current_state.xpos, current_state.ypos)
update_sideplot_slices()
line_sp_x.set_visible(True)
line_sp_y.set_visible(True)
crosshair_hline.set_visible(True)
crosshair_vline.set_visible(True)
# thicker lines if on the axis edges
crosshair_vline.set_linewidth(6 if xedge else 2)
crosshair_hline.set_linewidth(6 if yedge else 2)
else: # do not update and hide crosshairs
line_sp_x.set_visible(False)
line_sp_y.set_visible(False)
crosshair_hline.set_visible(False)
crosshair_vline.set_visible(False)
def update_button_release(info):
# mouse button release
current_state.focus(info.inaxes)
if info.inaxes == ax0:
xlim = ax0.get_xlim()
ylim = ax0.get_ylim()
x0, y0 = info.xdata, info.ydata
if x0 > xlim[0] and x0 < xlim[1] and y0 > ylim[0] and y0 < ylim[1]:
xarg = np.abs(xaxis.points - x0).argmin()
yarg = np.abs(yaxis.points - y0).argmin()
if info.button == 1 or info.button is None: # left click
update_crosshairs(xarg, yarg)
elif info.button == 3: # right click
update_crosshairs(xarg, yarg, hide=True)
fig.canvas.draw_idle()
def update_key_press(info):
if info.key in ["left", "right", "up", "down"]:
if current_state.focus.focus_axis != ax0: # sliders
if info.key in ["up", "down"]:
return
slider = [
slider
for slider in sliders.values()
if slider.ax == current_state.focus.focus_axis
][0]
new_val = slider.val + 1 if info.key == "right" else slider.val - 1
new_val %= slider.valmax + 1
slider.set_val(new_val)
else: # crosshairs
dx = dy = 0
if info.key == "left":
dx -= 1
elif info.key == "right":
dx += 1
elif info.key == "up":
dy += 1
elif info.key == "down":
dy -= 1
update_crosshairs(
(current_state.xarg + dx * xdir) % xaxis.points.flatten().size,
(current_state.yarg + dy * ydir) % yaxis.points.flatten().size,
)
elif info.key == "tab":
current_state.focus("next")
elif info.key == "ctrl+tab":
current_state.focus("previous")
else:
mpl.backend_bases.key_press_handler(info, fig.canvas, fig.canvas.toolbar)
fig.canvas.draw_idle()
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect("button_release_event", update_button_release)
fig.canvas.mpl_connect("key_press_event", update_key_press)
radio.on_clicked(update_local)
for slider in sliders.values():
slider.on_changed(update_slider)
return obj2D, sliders, crosshair_hline, crosshair_vline, radio, colorbar
| wright-group/WrightTools | WrightTools/artists/_interact.py | Python | mit | 20,114 | 0.00179 |
# -*- coding: utf-8 -*-
from core.test_utils.context_managers import SettingsOverride
from django import template
from django.test.testcases import TestCase
class PackaginatorTagsTests(TestCase):
def test_fixed_ga(self):
tpl = template.Template("""
{% load packaginator_tags %}
{% fixed_ga %}
""")
context = template.Context()
with SettingsOverride(URCHIN_ID='testid', DEBUG=False):
output = tpl.render(context)
self.assertTrue('var pageTracker = _gat._getTracker("testid");' in output)
with SettingsOverride(URCHIN_ID='testid', DEBUG=True):
output = tpl.render(context)
self.assertEqual(output.strip(), "")
with SettingsOverride(URCHIN_ID=None, DEBUG=True):
output = tpl.render(context)
self.assertEqual(output.strip(), "")
with SettingsOverride(URCHIN_ID=None, DEBUG=False):
output = tpl.render(context)
self.assertEqual(output.strip(), "") | pythonchelle/opencomparison | apps/core/tests/test_ga.py | Python | mit | 1,060 | 0.00566 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from erpnext.controllers.selling_controller import SellingController
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_delivery_note_serial_no
from frappe import _
from frappe.contacts.doctype.address.address import get_company_address
from frappe.desk.notifications import clear_doctype_notifications
from frappe.model.mapper import get_mapped_doc
from frappe.model.utils import get_fetch_values
from frappe.utils import cint, flt
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class DeliveryNote(SellingController):
def __init__(self, *args, **kwargs):
super(DeliveryNote, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_delivered',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_order',
'status_field': 'delivery_status',
'keyword': 'Delivered',
'second_source_dt': 'Sales Invoice Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'second_source_extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Invoice Item',
'join_field': 'si_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Invoice',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_invoice',
'overflow_type': 'delivery',
'no_allowance': 1
}]
if cint(self.is_return):
self.status_updater.append({
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
'source_field': '-1 * qty',
'second_source_dt': 'Sales Invoice Item',
'second_source_field': '-1 * qty',
'second_join_field': 'so_detail',
'extra_cond': """ and exists (select name from `tabDelivery Note`
where name=`tabDelivery Note Item`.parent and is_return=1)""",
'second_source_extra_cond': """ and exists (select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and is_return=1 and update_stock=1)"""
})
def before_print(self):
def toggle_print_hide(meta, fieldname):
df = meta.get_field(fieldname)
if self.get("print_without_amount"):
df.set("__print_hide", 1)
else:
df.delete_key("__print_hide")
item_meta = frappe.get_meta("Delivery Note Item")
print_hide_fields = {
"parent": ["grand_total", "rounded_total", "in_words", "currency", "total", "taxes"],
"items": ["rate", "amount", "discount_amount", "price_list_rate", "discount_percentage"]
}
for key, fieldname in print_hide_fields.items():
for f in fieldname:
toggle_print_hide(self.meta if key == "parent" else item_meta, f)
super(DeliveryNote, self).before_print()
def set_actual_qty(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
actual_qty = frappe.db.sql("""select actual_qty from `tabBin`
where item_code = %s and warehouse = %s""", (d.item_code, d.warehouse))
d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0
def so_required(self):
"""check in manage account if sales order required or not"""
if frappe.db.get_value("Selling Settings", None, 'so_required') == 'Yes':
for d in self.get('items'):
if not d.against_sales_order:
frappe.throw(_("Sales Order required for Item {0}").format(d.item_code))
def validate(self):
self.validate_posting_time()
super(DeliveryNote, self).validate()
self.set_status()
self.so_required()
self.validate_proj_cust()
self.check_sales_order_on_hold_or_close("against_sales_order")
self.validate_for_items()
self.validate_warehouse()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.validate_with_previous_doc()
if self._action != 'submit' and not self.is_return:
set_batch_nos(self, 'warehouse', True)
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.update_current_stock()
if not self.installation_status: self.installation_status = 'Not Installed'
def validate_with_previous_doc(self):
super(DeliveryNote, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "against_sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Sales Invoice": {
"ref_dn_field": "against_sales_invoice",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Invoice Item": {
"ref_dn_field": "si_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) \
and not self.is_return:
self.validate_rate_with_reference_doc([["Sales Order", "against_sales_order", "so_detail"],
["Sales Invoice", "against_sales_invoice", "si_detail"]])
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_for_items(self):
for d in self.get('items'):
#Customer Provided parts will have zero valuation rate
if frappe.db.get_value('Item', d.item_code, 'is_customer_provided_item'):
d.allow_zero_valuation_rate = 1
def validate_warehouse(self):
super(DeliveryNote, self).validate_warehouse()
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == 1:
if not d['warehouse']:
frappe.throw(_("Warehouse required for stock Item {0}").format(d["item_code"]))
def update_current_stock(self):
if self.get("_action") and self._action != "update_after_submit":
for d in self.get('items'):
d.actual_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, "actual_qty")
for d in self.get('packed_items'):
bin_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, ["actual_qty", "projected_qty"], as_dict=True)
if bin_qty:
d.actual_qty = flt(bin_qty.actual_qty)
d.projected_qty = flt(bin_qty.projected_qty)
def on_submit(self):
self.validate_packed_qty()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
# update delivered qty in sales order
self.update_prevdoc_status()
self.update_billing_status()
if not self.is_return:
self.check_credit_limit()
elif self.issue_credit_note:
self.make_return_invoice()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
self.update_stock_ledger()
self.make_gl_entries()
self.update_blanket_order()
def on_cancel(self):
super(DeliveryNote, self).on_cancel()
self.check_sales_order_on_hold_or_close("against_sales_order")
self.check_next_docstatus()
self.update_prevdoc_status()
self.update_billing_status()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
self.update_stock_ledger()
self.cancel_packing_slips()
self.make_gl_entries_on_cancel()
self.update_blanket_order()
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
extra_amount = 0
validate_against_credit_limit = False
bypass_credit_limit_check_at_sales_order = cint(frappe.db.get_value("Customer Credit Limit",
filters={'parent': self.customer, 'parenttype': 'Customer', 'company': self.company},
fieldname="bypass_credit_limit_check"))
if bypass_credit_limit_check_at_sales_order:
validate_against_credit_limit = True
extra_amount = self.base_grand_total
else:
for d in self.get("items"):
if not (d.against_sales_order or d.against_sales_invoice):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company,
bypass_credit_limit_check_at_sales_order, extra_amount)
def validate_packed_qty(self):
"""
Validate that if packed qty exists, it should be equal to qty
"""
if not any([flt(d.get('packed_qty')) for d in self.get("items")]):
return
has_error = False
for d in self.get("items"):
if flt(d.get('qty')) != flt(d.get('packed_qty')):
frappe.msgprint(_("Packed quantity must equal quantity for Item {0} in row {1}").format(d.item_code, d.idx))
has_error = True
if has_error:
raise frappe.ValidationError
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.delivery_note = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Sales Invoice {0} has already been submitted").format(submit_rv[0][0]))
submit_in = frappe.db.sql("""select t1.name
from `tabInstallation Note` t1, `tabInstallation Note Item` t2
where t1.name = t2.parent and t2.prevdoc_docname = %s and t1.docstatus = 1""",
(self.name))
if submit_in:
frappe.throw(_("Installation Note {0} has already been submitted").format(submit_in[0][0]))
def cancel_packing_slips(self):
"""
Cancel submitted packing slips related to this delivery note
"""
res = frappe.db.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %s
AND docstatus = 1""", self.name)
if res:
for r in res:
ps = frappe.get_doc('Packing Slip', r[0])
ps.cancel()
frappe.msgprint(_("Packing Slip(s) cancelled"))
def update_status(self, status):
self.set_status(update=True, status=status)
self.notify_update()
clear_doctype_notifications(self)
def update_billing_status(self, update_modified=True):
updated_delivery_notes = [self.name]
for d in self.get("items"):
if d.si_detail and not d.so_detail:
d.db_set('billed_amt', d.amount, update_modified=update_modified)
elif d.so_detail:
updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified)
for dn in set(updated_delivery_notes):
dn_doc = self if (dn == self.name) else frappe.get_doc("Delivery Note", dn)
dn_doc.update_billing_percentage(update_modified=update_modified)
self.load_from_db()
def make_return_invoice(self):
try:
return_invoice = make_sales_invoice(self.name)
return_invoice.is_return = True
return_invoice.save()
return_invoice.submit()
credit_note_link = frappe.utils.get_link_to_form('Sales Invoice', return_invoice.name)
frappe.msgprint(_("Credit Note {0} has been created automatically").format(credit_note_link))
except:
frappe.throw(_("Could not create Credit Note automatically, please uncheck 'Issue Credit Note' and submit again"))
def update_billed_amount_based_on_so(so_detail, update_modified=True):
# Billed against Sales Order directly
billed_against_so = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where so_detail=%s and (dn_detail is null or dn_detail = '') and docstatus=1""", so_detail)
billed_against_so = billed_against_so and billed_against_so[0][0] or 0
# Get all Delivery Note Item rows against the Sales Order Item row
dn_details = frappe.db.sql("""select dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent
from `tabDelivery Note Item` dn_item, `tabDelivery Note` dn
where dn.name=dn_item.parent and dn_item.so_detail=%s
and dn.docstatus=1 and dn.is_return = 0
order by dn.posting_date asc, dn.posting_time asc, dn.name asc""", so_detail, as_dict=1)
updated_dn = []
for dnd in dn_details:
billed_amt_agianst_dn = 0
# If delivered against Sales Invoice
if dnd.si_detail:
billed_amt_agianst_dn = flt(dnd.amount)
billed_against_so -= billed_amt_agianst_dn
else:
# Get billed amount directly against Delivery Note
billed_amt_agianst_dn = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where dn_detail=%s and docstatus=1""", dnd.name)
billed_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0
# Distribute billed amount directly against SO between DNs based on FIFO
if billed_against_so and billed_amt_agianst_dn < dnd.amount:
pending_to_bill = flt(dnd.amount) - billed_amt_agianst_dn
if pending_to_bill <= billed_against_so:
billed_amt_agianst_dn += pending_to_bill
billed_against_so -= pending_to_bill
else:
billed_amt_agianst_dn += billed_against_so
billed_against_so = 0
frappe.db.set_value("Delivery Note Item", dnd.name, "billed_amt", billed_amt_agianst_dn, update_modified=update_modified)
updated_dn.append(dnd.parent)
return updated_dn
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Shipments'),
})
return list_context
def get_invoiced_qty_map(delivery_note):
"""returns a map: {dn_detail: invoiced_qty}"""
invoiced_qty_map = {}
for dn_detail, qty in frappe.db.sql("""select dn_detail, qty from `tabSales Invoice Item`
where delivery_note=%s and docstatus=1""", delivery_note):
if not invoiced_qty_map.get(dn_detail):
invoiced_qty_map[dn_detail] = 0
invoiced_qty_map[dn_detail] += qty
return invoiced_qty_map
def get_returned_qty_map(delivery_note):
"""returns a map: {so_detail: returned_qty}"""
returned_qty_map = frappe._dict(frappe.db.sql("""select dn_item.item_code, sum(abs(dn_item.qty)) as qty
from `tabDelivery Note Item` dn_item, `tabDelivery Note` dn
where dn.name = dn_item.parent
and dn.docstatus = 1
and dn.is_return = 1
and dn.return_against = %s
group by dn_item.item_code
""", delivery_note))
return returned_qty_map
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
doc = frappe.get_doc('Delivery Note', source_name)
to_make_invoice_qty_map = {}
returned_qty_map = get_returned_qty_map(source_name)
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("set_po_nos")
if len(target.get("items")) == 0:
frappe.throw(_("All these items have already been invoiced"))
target.run_method("calculate_taxes_and_totals")
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Sales Invoice", 'company_address', target.company_address))
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = to_make_invoice_qty_map[source_doc.name]
if source_doc.serial_no and source_parent.per_billed > 0:
target_doc.serial_no = get_delivery_note_serial_no(source_doc.item_code,
target_doc.qty, source_parent.name)
def get_pending_qty(item_row):
pending_qty = item_row.qty - invoiced_qty_map.get(item_row.name, 0)
returned_qty = 0
if returned_qty_map.get(item_row.item_code, 0) > 0:
returned_qty = flt(returned_qty_map.get(item_row.item_code, 0))
returned_qty_map[item_row.item_code] -= pending_qty
if returned_qty:
if returned_qty >= pending_qty:
pending_qty = 0
returned_qty -= pending_qty
else:
pending_qty -= returned_qty
returned_qty = 0
to_make_invoice_qty_map[item_row.name] = pending_qty
return pending_qty
doc = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Sales Invoice",
"field_map": {
"is_return": "is_return"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "dn_detail",
"parent": "delivery_note",
"so_detail": "so_detail",
"against_sales_order": "sales_order",
"serial_no": "serial_no",
"cost_center": "cost_center"
},
"postprocess": update_item,
"filter": lambda d: get_pending_qty(d) <= 0 if not doc.get("is_return") else get_pending_qty(d) > 0
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doc
@frappe.whitelist()
def make_delivery_trip(source_name, target_doc=None):
def update_stop_details(source_doc, target_doc, source_parent):
target_doc.customer = source_parent.customer
target_doc.address = source_parent.shipping_address_name
target_doc.customer_address = source_parent.shipping_address
target_doc.contact = source_parent.contact_person
target_doc.customer_contact = source_parent.contact_display
target_doc.grand_total = source_parent.grand_total
# Append unique Delivery Notes in Delivery Trip
delivery_notes.append(target_doc.delivery_note)
delivery_notes = []
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Delivery Trip",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Delivery Stop",
"field_map": {
"parent": "delivery_note"
},
"condition": lambda item: item.parent not in delivery_notes,
"postprocess": update_stop_details
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_installation_note(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.qty = flt(obj.qty) - flt(obj.installed_qty)
target.serial_no = obj.serial_no
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Installation Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Installation Note Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
},
"postprocess": update_item,
"condition": lambda doc: doc.installed_qty < doc.qty
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_packing_slip(source_name, target_doc=None):
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Packing Slip",
"field_map": {
"name": "delivery_note",
"letter_head": "letter_head"
},
"validation": {
"docstatus": ["=", 0]
}
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Delivery Note", source_name, target_doc)
@frappe.whitelist()
def update_delivery_note_status(docname, status):
dn = frappe.get_doc("Delivery Note", docname)
dn.update_status(status)
| libracore/erpnext | erpnext/stock/doctype/delivery_note/delivery_note.py | Python | gpl-3.0 | 23,654 | 0.003551 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class UtmCampaign(models.Model):
_inherit = 'utm.campaign'
ab_testing_winner_selection = fields.Selection(selection_add=[('crm_lead_count', 'Leads')])
| jeremiahyan/odoo | addons/mass_mailing_crm/models/utm.py | Python | gpl-3.0 | 294 | 0.003401 |
# -*- coding: utf-8 *-*
from collections import OrderedDict
from nicepy.utils import ljust_all, pretty_repr
def get_failed_msg(compare_method, values, expected_values, names=None, expected_names=None):
failed_list = []
names = names or map(str, range(len(values)))
expected_names = expected_names or [''] * len(names)
for value, expected_value, name, expected_name in zip(values, expected_values,
names, expected_names):
#print value, expected_value, name, expected_name
if not compare_method(expected_value, value):
failed_list.append((pretty_repr(value), pretty_repr(expected_value),
name, expected_name))
return _get_failed_msg(failed_list)
def _get_failed_msg(failed_list):
if not failed_list:
return None
msg = 'actual values != expected values:'
failed_list = zip(*map(ljust_all, zip(*failed_list)))
for value_repr, expected_value_repr, name, expected_name in sorted(failed_list):
msg += '\n\t%s' % name
if expected_name:
msg += ' != %s' % expected_name
msg += ': %s != %s' % (value_repr, expected_value_repr)
return msg
def get_multi_failed_msg(assert_method, *lists):
failed_msgs = OrderedDict()
for index, args in enumerate(zip(*lists)):
try:
assert_method(*args)
except AssertionError as e:
failed_msgs[index] = e.message
msg = None
if failed_msgs:
msg = 'Multi-assert failed:'
for index, error_msg in sorted(failed_msgs.iteritems()):
msg += '\nIndex %d: %s' % (index, error_msg)
return msg
| katakumpo/nicepy | nicepy/assertions/helpers.py | Python | mit | 1,696 | 0.004717 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-12 23:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter_feed', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='links',
field=models.CharField(default=' ', max_length=200),
preserve_default=False,
),
]
| isstiaung/Adimal | adimal/twitter_feed/migrations/0002_tweet_links.py | Python | mit | 490 | 0 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import metrics
from tensorflow.python.keras.utils import metrics_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(
name='my_precision', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
self.assertEqual(p_obj.top_k, 15)
self.assertEqual(p_obj.class_id, 12)
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
self.assertEqual(p_obj2.top_k, 15)
self.assertEqual(p_obj2.class_id, 12)
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
def test_unweighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1. / 3, self.evaluate(result))
def test_weighted_top_k(self):
p_obj = metrics.Precision(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(p_obj.variables))
self.evaluate(
p_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
expected_precision = tp / predicted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_unweighted_class_id(self):
p_obj = metrics.Precision(class_id=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_class_id(self):
p_obj = metrics.Precision(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
def test_unweighted_top_k_and_threshold(self):
p_obj = metrics.Precision(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(p_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(
name='my_recall', thresholds=[0.4, 0.9], top_k=15, class_id=12)
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
self.assertEqual(r_obj.top_k, 15)
self.assertEqual(r_obj.class_id, 12)
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
self.assertEqual(r_obj2.top_k, 15)
self.assertEqual(r_obj2.class_id, 12)
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
def test_unweighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_weighted_top_k(self):
r_obj = metrics.Recall(top_k=3)
y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
self.evaluate(variables.variables_initializer(r_obj.variables))
self.evaluate(
r_obj(
y_true1,
y_pred1,
sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))
y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
result = r_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))
tp = (2 + 5) + (3 + 3)
positives = (4 + 2 + 5) + (3 + 3 + 3 + 3)
expected_recall = tp / positives
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_unweighted_class_id(self):
r_obj = metrics.Recall(class_id=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([0.2, 0.1, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 0, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_class_id(self):
r_obj = metrics.Recall(class_id=2, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(0, self.evaluate(r_obj.false_negatives))
y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(1, self.evaluate(r_obj.false_negatives))
def test_unweighted_top_k_and_threshold(self):
r_obj = metrics.Recall(thresholds=.7, top_k=2)
self.evaluate(variables.variables_initializer(r_obj.variables))
y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
y_true = constant_op.constant([1, 1, 1, 0, 1], shape=(1, 5))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.25, self.evaluate(result))
self.assertAlmostEqual(1, self.evaluate(r_obj.true_positives))
self.assertAlmostEqual(3, self.evaluate(r_obj.false_negatives))
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class AUCTest(test.TestCase):
def setup(self):
self.num_thresholds = 3
self.y_pred = constant_op.constant([0, 0.5, 0.3, 0.9], dtype=dtypes.float32)
self.y_true = constant_op.constant([0, 0, 1, 1])
self.sample_weight = [1, 2, 3, 4]
# threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
# y_pred when threshold = 0 - 1e-7 : [1, 1, 1, 1]
# y_pred when threshold = 0.5 : [0, 0, 0, 1]
# y_pred when threshold = 1 + 1e-7 : [0, 0, 0, 0]
# without sample_weight:
# tp = np.sum([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 0, 0]], axis=1)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# with sample_weight:
# tp = np.sum([[0, 0, 3, 4], [0, 0, 0, 4], [0, 0, 0, 0]], axis=1)
# fp = np.sum([[1, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], axis=1)
# fn = np.sum([[0, 0, 0, 0], [0, 0, 3, 0], [0, 0, 3, 4]], axis=1)
# tn = np.sum([[0, 0, 0, 0], [1, 2, 0, 0], [1, 2, 0, 0]], axis=1)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
def test_config(self):
auc_obj = metrics.AUC(
num_thresholds=100,
curve='PR',
summation_method='majoring',
name='auc_1')
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 100)
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 100)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_config_manual_thresholds(self):
auc_obj = metrics.AUC(
num_thresholds=None,
curve='PR',
summation_method='majoring',
name='auc_1',
thresholds=[0.3, 0.5])
self.assertEqual(auc_obj.name, 'auc_1')
self.assertEqual(len(auc_obj.variables), 4)
self.assertEqual(auc_obj.num_thresholds, 4)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.3, 0.5, 1.0])
self.assertEqual(auc_obj.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
old_config = auc_obj.get_config()
self.assertDictEqual(old_config, json.loads(json.dumps(old_config)))
# Check save and restore config.
auc_obj2 = metrics.AUC.from_config(auc_obj.get_config())
self.assertEqual(auc_obj2.name, 'auc_1')
self.assertEqual(len(auc_obj2.variables), 4)
self.assertEqual(auc_obj2.num_thresholds, 4)
self.assertEqual(auc_obj2.curve, metrics_utils.AUCCurve.PR)
self.assertEqual(auc_obj2.summation_method,
metrics_utils.AUCSummationMethod.MAJORING)
new_config = auc_obj2.get_config()
self.assertDictEqual(old_config, new_config)
self.assertAllClose(auc_obj.thresholds, auc_obj2.thresholds)
def test_value_is_idempotent(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=3)
self.evaluate(variables.variables_initializer(auc_obj.variables))
# Run several updates.
update_op = auc_obj.update_state(self.y_true, self.y_pred)
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_auc = self.evaluate(auc_obj.result())
for _ in range(10):
self.assertAllClose(initial_auc, self.evaluate(auc_obj.result()), 1e-3)
def test_unweighted_all_correct(self):
self.setup()
auc_obj = metrics.AUC()
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_true)
self.assertEqual(self.evaluate(result), 1)
def test_unweighted(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_manual_thresholds(self):
self.setup()
# Verify that when specified, thresholds are used instead of num_thresholds.
auc_obj = metrics.AUC(num_thresholds=2, thresholds=[0.5])
self.assertEqual(auc_obj.num_thresholds, 3)
self.assertAllClose(auc_obj.thresholds, [0.0, 0.5, 1.0])
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred)
# tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
# recall = [2/2, 1/(1+1), 0] = [1, 0.5, 0]
# fp_rate = [2/2, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.5)/2, (0.5 + 0)/2] = [0.75, 0.25]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.75 * 1 + 0.25 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds)
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [(1 + 0.571)/2, (0.571 + 0)/2] = [0.7855, 0.2855]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.7855 * 1 + 0.2855 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [max(1, 0.571), max(0.571, 0)] = [1, 0.571]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (1 * 1 + 0.571 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_roc_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds, summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# fp_rate = [3/3, 0, 0] = [1, 0, 0]
# heights = [min(1, 0.571), min(0.571, 0)] = [0.571, 0]
# widths = [(1 - 0), (0 - 0)] = [1, 0]
expected_result = (0.571 * 1 + 0 * 0)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_majoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='majoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [max(0.7, 1), max(1, 0)] = [1, 1]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (1 * 0.429 + 1 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_minoring(self):
self.setup()
auc_obj = metrics.AUC(
num_thresholds=self.num_thresholds,
curve='PR',
summation_method='minoring')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# precision = [7/(7+3), 4/4, 0] = [0.7, 1, 0]
# recall = [7/7, 4/(4+3), 0] = [1, 0.571, 0]
# heights = [min(0.7, 1), min(1, 0)] = [0.7, 0]
# widths = [(1 - 0.571), (0.571 - 0)] = [0.429, 0.571]
expected_result = (0.7 * 0.429 + 0 * 0.571)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_weighted_pr_interpolation(self):
self.setup()
auc_obj = metrics.AUC(num_thresholds=self.num_thresholds, curve='PR')
self.evaluate(variables.variables_initializer(auc_obj.variables))
result = auc_obj(self.y_true, self.y_pred, sample_weight=self.sample_weight)
# auc = (slope / Total Pos) * [dTP - intercept * log(Pb/Pa)]
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
# P = tp + fp = [10, 4, 0]
# dTP = [7-4, 4-0] = [3, 4]
# dP = [10-4, 4-0] = [6, 4]
# slope = dTP/dP = [0.5, 1]
# intercept = (TPa+(slope*Pa) = [(4 - 0.5*4), (0 - 1*0)] = [2, 0]
# (Pb/Pa) = (Pb/Pa) if Pb > 0 AND Pa > 0 else 1 = [10/4, 4/0] = [2.5, 1]
# auc * TotalPos = [(0.5 * (3 + 2 * log(2.5))), (1 * (4 + 0))]
# = [2.416, 4]
# auc = [2.416, 4]/(tp[1:]+fn[1:])
expected_result = (2.416/7 + 4/7)
self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=-1)
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
metrics.AUC(num_thresholds=1)
def test_invalid_curve(self):
with self.assertRaisesRegexp(ValueError,
'Invalid AUC curve value "Invalid".'):
metrics.AUC(curve='Invalid')
def test_invalid_summation_method(self):
with self.assertRaisesRegexp(
ValueError, 'Invalid AUC summation method value "Invalid".'):
metrics.AUC(summation_method='Invalid')
if __name__ == '__main__':
test.main()
| chemelnucfin/tensorflow | tensorflow/python/keras/metrics_confusion_matrix_test.py | Python | apache-2.0 | 51,243 | 0.002381 |
import sys
from traceback import format_exception
import colorama
def color(color_, settings):
"""Utility for ability to disabling colored output."""
if settings.no_colors:
return ''
else:
return color_
def exception(title, exc_info, settings):
sys.stderr.write(
u'{warn}[WARN] {title}:{reset}\n{trace}'
u'{warn}----------------------------{reset}\n\n'.format(
warn=color(colorama.Back.RED + colorama.Fore.WHITE
+ colorama.Style.BRIGHT, settings),
reset=color(colorama.Style.RESET_ALL, settings),
title=title,
trace=''.join(format_exception(*exc_info))))
def rule_failed(rule, exc_info, settings):
exception('Rule {}'.format(rule.name), exc_info, settings)
def show_command(new_command, settings):
sys.stderr.write('{bold}{command}{reset}\n'.format(
command=new_command,
bold=color(colorama.Style.BRIGHT, settings),
reset=color(colorama.Style.RESET_ALL, settings)))
def confirm_command(new_command, settings):
sys.stderr.write(
'{bold}{command}{reset} [{green}enter{reset}/{red}ctrl+c{reset}]'.format(
command=new_command,
bold=color(colorama.Style.BRIGHT, settings),
green=color(colorama.Fore.GREEN, settings),
red=color(colorama.Fore.RED, settings),
reset=color(colorama.Style.RESET_ALL, settings)))
sys.stderr.flush()
def failed(msg, settings):
sys.stderr.write('{red}{msg}{reset}\n'.format(
msg=msg,
red=color(colorama.Fore.RED, settings),
reset=color(colorama.Style.RESET_ALL, settings)))
| JianfengYao/thefuck | thefuck/logs.py | Python | mit | 1,661 | 0.000602 |
"""This module is responsible for storing files on disk.
The storage strategy is as follows:
- Files themselves are stored in a separate directory called 'blobs'.
- Stored files are named by their SHA256 hashes (in hex).
- Stored files are grouped into directories by their first byte (two hex
characters), referred to as 'prefix'.
- To minimize disk usage, duplicate files are only stored once.
- All blobs are stored compressed (gzip).
- A directory tree is maintanted with symlinks that mirror the logical
file naming and hierarchy.
- Symlinks are created and deleted by the server as needed, and they
have their own modification time ("version") different from the
modification time of the blob.
- Accesses to links and blobs are protected by separate fcntl locks
to avoid concurrent modification.
- Additional metadata about blobs is stored in a BSDDB kv-store.
- The metadata stored ATM is the symlink count and decompressed
("logical") size.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import email.utils
import errno
import fcntl
import gevent
import gzip
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import bsddb3
import six
from filetracker.utils import file_digest
_LOCK_RETRIES = 20
_LOCK_SLEEP_TIME_S = 1
logger = logging.getLogger(__name__)
class FiletrackerFileNotFoundError(Exception):
pass
class ConcurrentModificationError(Exception):
"""Raised after acquiring lock failed multiple times."""
def __init__(self, lock_name):
message = 'Failed to acquire lock: {}'.format(lock_name)
super(ConcurrentModificationError, self).__init__(self, message)
class FileStorage(object):
"""Manages the whole file storage."""
def __init__(self, base_dir):
self.base_dir = base_dir
self.blobs_dir = os.path.join(base_dir, 'blobs')
self.links_dir = os.path.join(base_dir, 'links')
self.locks_dir = os.path.join(base_dir, 'locks')
self.db_dir = os.path.join(base_dir, 'db')
_makedirs(self.blobs_dir)
_makedirs(self.links_dir)
_makedirs(self.locks_dir)
_makedirs(self.db_dir)
# https://docs.oracle.com/cd/E17076_05/html/programmer_reference/transapp_env_open.html
self.db_env = bsddb3.db.DBEnv()
try:
self.db_env.open(
self.db_dir,
bsddb3.db.DB_CREATE
| bsddb3.db.DB_INIT_LOCK
| bsddb3.db.DB_INIT_LOG
| bsddb3.db.DB_INIT_MPOOL
| bsddb3.db.DB_INIT_TXN
| bsddb3.db.DB_REGISTER,
)
except bsddb3.db.DBRunRecoveryError:
raise RuntimeError(
'DB requires recovery! It should have run in .run.main...'
)
self.db = bsddb3.db.DB(self.db_env)
self.db.open(
'metadata',
dbtype=bsddb3.db.DB_HASH,
flags=bsddb3.db.DB_CREATE | bsddb3.db.DB_AUTO_COMMIT,
)
def __del__(self):
self.db.close()
self.db_env.close()
def store(
self,
name,
data,
version,
size=0,
compressed=False,
digest=None,
logical_size=None,
):
"""Adds a new file to the storage.
If the file with the same name existed before, it's not
guaranteed that the link for the old version will exist until
the operation completes, but it's guaranteed that the link
will never point to an invalid blob.
Args:
name: name of the file being stored.
May contain slashes that are treated as path separators.
data: binary file-like object with file contents.
Files with unknown length are supported for compatibility with
WSGI interface: ``size`` parameter should be passed in these
cases.
version: new file "version"
Link modification time will be set to this timestamp. If
the link exists, and its modification time is higher, the
file is not overwritten.
size: length of ``data`` in bytes
If not 0, this takes priority over internal ``data`` size.
compressed: whether ``data`` is gzip-compressed
If True, the compression is skipped, and file is written as-is.
Note that the current server implementation sends
'Content-Encoding' header anyway, mandating client to
decompress the file.
digest: SHA256 digest of the file before compression
If specified, the digest will not be computed again, saving
resources.
logical_size: if ``data`` is gzip-compressed, this parameter
has to be set to decompressed file size.
"""
with _exclusive_lock(self._lock_path('links', name)):
logger.debug('Acquired lock to link for %s.', name)
link_path = self._link_path(name)
if _path_exists(link_path) and _file_version(link_path) > version:
logger.info(
'Tried to store older version of %s (%d < %d), ignoring.',
name,
version,
_file_version(link_path),
)
return _file_version(link_path)
# data is managed by contents now, and shouldn't be used directly
with _InputStreamWrapper(data, size) as contents:
if digest is None or logical_size is None:
contents.save()
if compressed:
# This shouldn't occur if the request came from a proper
# filetracker client, so we don't care if it's slow.
logger.warning('Storing compressed stream without hints.')
with gzip.open(contents.current_path, 'rb') as decompressed:
digest = file_digest(decompressed)
with gzip.open(contents.current_path, 'rb') as decompressed:
logical_size = _read_stream_for_size(decompressed)
else:
digest = file_digest(contents.current_path)
logical_size = os.stat(contents.current_path).st_size
blob_path = self._blob_path(digest)
with _exclusive_lock(self._lock_path('blobs', digest)):
logger.debug('Acquired lock for blob %s.', digest)
digest_bytes = digest.encode()
with self._db_transaction() as txn:
logger.debug('Started DB transaction (adding link).')
link_count = int(self.db.get(digest_bytes, 0, txn=txn))
new_count = str(link_count + 1).encode()
self.db.put(digest_bytes, new_count, txn=txn)
if link_count == 0:
self.db.put(
'{}:logical_size'.format(digest).encode(),
str(logical_size).encode(),
txn=txn,
)
logger.debug('Commiting DB transaction (adding link).')
logger.debug('Committed DB transaction (adding link).')
# Create a new blob if this isn't a duplicate.
if link_count == 0:
logger.debug('Creating new blob.')
_create_file_dirs(blob_path)
if compressed:
contents.save(blob_path)
else:
contents.save()
with open(contents.current_path, 'rb') as raw, gzip.open(
blob_path, 'wb'
) as blob:
shutil.copyfileobj(raw, blob)
logger.debug('Released lock for blob %s.', digest)
if _path_exists(link_path):
# Lend the link lock to delete().
# Note that DB lock has to be released in advance, otherwise
# deadlock is possible in concurrent scenarios.
logger.info('Overwriting existing link %s.', name)
self.delete(name, version, _lock=False)
_create_file_dirs(link_path)
rel_blob_path = os.path.relpath(blob_path, os.path.dirname(link_path))
os.symlink(rel_blob_path, link_path)
logger.debug('Created link %s.', name)
lutime(link_path, version)
return version
logger.debug('Released lock for link %s.', name)
def delete(self, name, version, _lock=True):
"""Removes a file from the storage.
Args:
name: name of the file being deleted.
May contain slashes that are treated as path separators.
version: file "version" that is meant to be deleted
If the file that is stored has newer version than provided,
it will not be deleted.
lock: whether or not to acquire locks
This is for internal use only,
normal users should always leave it set to True.
Returns whether or not the file has been deleted.
"""
link_path = self._link_path(name)
if _lock:
file_lock = _exclusive_lock(self._lock_path('links', name))
else:
file_lock = _no_lock()
with file_lock:
logger.debug('Acquired or inherited lock for link %s.', name)
if not _path_exists(link_path):
raise FiletrackerFileNotFoundError
if _file_version(link_path) > version:
logger.info(
'Tried to delete newer version of %s (%d < %d), ignoring.',
name,
version,
_file_version(link_path),
)
return False
digest = self._digest_for_link(name)
with _exclusive_lock(self._lock_path('blobs', digest)):
logger.debug('Acquired lock for blob %s.', digest)
should_delete_blob = False
with self._db_transaction() as txn:
logger.debug('Started DB transaction (deleting link).')
digest_bytes = digest.encode()
link_count = self.db.get(digest_bytes, txn=txn)
if link_count is None:
raise RuntimeError("File exists but has no key in db")
link_count = int(link_count)
if link_count == 1:
logger.debug('Deleting last link to blob %s.', digest)
self.db.delete(digest_bytes, txn=txn)
self.db.delete(
'{}:logical_size'.format(digest).encode(), txn=txn
)
should_delete_blob = True
else:
new_count = str(link_count - 1).encode()
self.db.put(digest_bytes, new_count, txn=txn)
logger.debug('Committing DB transaction (deleting link).')
logger.debug('Committed DB transaction (deleting link).')
os.unlink(link_path)
logger.debug('Deleted link %s.', name)
if should_delete_blob:
os.unlink(self._blob_path(digest))
logger.debug('Released lock for blob %s.', digest)
logger.debug('Released (or gave back) lock for link %s.', name)
return True
def stored_version(self, name):
"""Returns the version of file `name` or None if it doesn't exist."""
link_path = self._link_path(name)
if not _path_exists(link_path):
return None
return _file_version(link_path)
def logical_size(self, name):
"""Returns the logical size (before compression) of file `name`."""
digest = self._digest_for_link(name)
logical_size = self.db.get('{}:logical_size'.format(digest).encode())
if logical_size:
return int(logical_size.decode())
else:
raise RuntimeError('Blob doesn\'t have :logical_size in DB: try recovering')
def _link_path(self, name):
return os.path.join(self.links_dir, name)
def _blob_path(self, digest):
return os.path.join(self.blobs_dir, digest[0:2], digest)
def _lock_path(self, *path_parts):
return os.path.join(self.locks_dir, *path_parts)
@contextlib.contextmanager
def _db_transaction(self):
txn = self.db_env.txn_begin()
try:
yield txn
except:
txn.abort()
raise
else:
txn.commit()
def _digest_for_link(self, name):
link = self._link_path(name)
blob_path = os.readlink(link)
digest = os.path.basename(blob_path)
return digest
class _InputStreamWrapper(object):
"""A wrapper for lazy reading and moving contents of 'wsgi.input'.
Should be used as a context manager.
"""
def __init__(self, data, size):
self._data = data
self._size = size
self.current_path = None
self.saved_in_temp = False
def __enter__(self):
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
"""Removes file if it was last saved as a temporary file."""
if self.saved_in_temp:
os.unlink(self.current_path)
def save(self, new_path=None):
"""Moves or creates the file with stream contents to a new location.
Args:
new_path: path to move to, if None a temporary file is created.
"""
self.saved_in_temp = new_path is None
if new_path is None:
fd, new_path = tempfile.mkstemp()
os.close(fd)
if self.current_path:
shutil.move(self.current_path, new_path)
else:
with open(new_path, 'wb') as dest:
_copy_stream(self._data, dest, self._size)
self.current_path = new_path
_BUFFER_SIZE = 64 * 1024
def _copy_stream(src, dest, length=0):
"""Similar to shutil.copyfileobj, but supports limiting data size.
As for why this is required, refer to
https://www.python.org/dev/peps/pep-0333/#input-and-error-streams
Yes, there are WSGI implementations which do not support EOFs, and
believe me, you don't want to debug this.
Args:
src: source file-like object
dest: destination file-like object
length: optional file size hint
If not 0, exactly length bytes will be written.
If 0, write will continue until EOF is encountered.
"""
if length == 0:
shutil.copyfileobj(src, dest)
return
bytes_left = length
while bytes_left > 0:
buf_size = min(_BUFFER_SIZE, bytes_left)
buf = src.read(buf_size)
dest.write(buf)
bytes_left -= buf_size
def _read_stream_for_size(stream):
"""Reads a stream discarding the data read and returns its size."""
size = 0
while True:
buf = stream.read(_BUFFER_SIZE)
size += len(buf)
if not buf:
break
return size
def _create_file_dirs(file_path):
"""Creates directory tree to file if it doesn't exist."""
dir_name = os.path.dirname(file_path)
_makedirs(dir_name)
def _path_exists(path):
"""Checks if the path exists
- is a file, a directory or a symbolic link that may be broken."""
return os.path.exists(path) or os.path.islink(path)
def _file_version(path):
return os.lstat(path).st_mtime
@contextlib.contextmanager
def _exclusive_lock(path):
"""A simple wrapper for fcntl exclusive lock."""
_create_file_dirs(path)
fd = os.open(path, os.O_WRONLY | os.O_CREAT, 0o600)
try:
retries_left = _LOCK_RETRIES
success = False
while retries_left > 0:
# try to acquire the lock in a loop
# because gevent doesn't treat flock as IO,
# so waiting here without yielding would get the worker killed
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
success = True
break
except IOError as e:
if e.errno in [errno.EAGAIN, errno.EWOULDBLOCK]:
# This yields execution to other green threads.
gevent.sleep(_LOCK_SLEEP_TIME_S)
retries_left -= 1
else:
raise
if success:
yield
else:
raise ConcurrentModificationError(path)
finally:
if success:
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
@contextlib.contextmanager
def _no_lock():
"""Does nothing, just runs the code within the `with` statement.
Used for conditional locking."""
yield
def _makedirs(path):
"""A py2 wrapper for os.makedirs() that simulates exist_ok=True flag."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def lutime(path, time):
if six.PY2:
t = email.utils.formatdate(time)
if subprocess.call(['touch', '-c', '-h', '-d', t, path]) != 0:
raise RuntimeError
else:
os.utime(path, (time, time), follow_symlinks=False)
| sio2project/filetracker | filetracker/servers/storage.py | Python | gpl-3.0 | 17,784 | 0.00045 |
import pytest
from indy import IndyError
from indy import did
from indy import wallet
from indy.error import ErrorCode
@pytest.mark.asyncio
@pytest.mark.parametrize("wallet_handle_cleanup", [False])
async def test_import_wallet_works(wallet_handle, wallet_config, credentials, export_config):
(_did, _verkey) = await did.create_and_store_my_did(wallet_handle, "{}")
await did.set_did_metadata(wallet_handle, _did, "metadata")
did_with_meta_before = await did.get_my_did_with_meta(wallet_handle, _did)
await wallet.export_wallet(wallet_handle, export_config)
await wallet.close_wallet(wallet_handle)
await wallet.delete_wallet(wallet_config, credentials)
await wallet.import_wallet(wallet_config, credentials, export_config)
wallet_handle = await wallet.open_wallet(wallet_config, credentials)
did_with_meta_after = await did.get_my_did_with_meta(wallet_handle, _did)
assert did_with_meta_before == did_with_meta_after
await wallet.close_wallet(wallet_handle)
@pytest.mark.asyncio
async def test_import_wallet_works_for_not_exit_path(wallet_config, credentials, export_config):
with pytest.raises(IndyError) as e:
await wallet.import_wallet(wallet_config, credentials, export_config)
assert ErrorCode.CommonIOError == e.value.error_code
| srottem/indy-sdk | wrappers/python/tests/wallet/test_import_wallet.py | Python | apache-2.0 | 1,307 | 0.00153 |
try:
import json as _json
except ImportError:
import sys
sys.path.append("simplejson-2.3.3")
import simplejson as _json
import requests as _requests
import urllib.parse as _urlparse
import random as _random
import base64 as _base64
from configparser import ConfigParser as _ConfigParser
import os as _os
_CT = "content-type"
_AJ = "application/json"
_URL_SCHEME = frozenset(["http", "https"])
def _get_token(
user_id,
password,
auth_svc="https://nexus.api.globusonline.org/goauth/token?"
+ "grant_type=client_credentials",
):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.b64encode(user_id + ":" + password)
headers = {"Authorization": "Basic " + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception(
"Authentication failed: Bad user_id/password "
+ "combination for user %s" % (user_id)
)
else:
raise Exception(ret.text)
return tok["access_token"]
def _read_rcfile(file=_os.environ["HOME"] + "/.authrc"): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {
x: rawdata.get(x)
for x in (
"user_id",
"token",
"client_secret",
"keyfile",
"keyfile_passphrase",
"password",
)
}
except Exception as e:
print("Error while reading authrc file %s: %s" % (file, e))
return authdata
def _read_inifile(
file=_os.environ.get( # @ReservedAssignment
"KB_DEPLOYMENT_CONFIG", _os.environ["HOME"] + "/.kbase_config"
)
):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {
x: config.get("authentication", x)
if config.has_option("authentication", x)
else None
for x in (
"user_id",
"token",
"client_secret",
"keyfile",
"keyfile_passphrase",
"password",
)
}
except Exception as e:
print("Error while reading INI file %s: %s" % (file, e))
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = "" if message is None else message
self.data = data or error or ""
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return (
self.name + ": " + str(self.code) + ". " + self.message + "\n" + self.data
)
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class Client(object):
def __init__(
self,
url=None,
timeout=30 * 60,
user_id=None,
password=None,
token=None,
ignore_authrc=False,
trust_all_ssl_certificates=False,
use_url_lookup=True,
):
if url is None:
raise ValueError("A url is required")
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
self.use_url_lookup = use_url_lookup
# token overrides user_id and password
if token is not None:
self._headers["AUTHORIZATION"] = token
elif user_id is not None and password is not None:
self._headers["AUTHORIZATION"] = _get_token(user_id, password)
elif "KB_AUTH_TOKEN" in _os.environ:
self._headers["AUTHORIZATION"] = _os.environ.get("KB_AUTH_TOKEN")
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get("token") is not None:
self._headers["AUTHORIZATION"] = authdata["token"]
elif (
authdata.get("user_id") is not None
and authdata.get("password") is not None
):
self._headers["AUTHORIZATION"] = _get_token(
authdata["user_id"], authdata["password"]
)
if self.timeout < 1:
raise ValueError("Timeout value must be at least 1 second")
def _call(self, url, method, params, json_rpc_context=None):
arg_hash = {
"method": method,
"params": params,
"version": "1.1",
"id": str(_random.random())[2:],
}
if json_rpc_context:
arg_hash["context"] = json_rpc_context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(
url,
data=body,
headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates,
)
if ret.status_code == _requests.codes.server_error:
if _CT in ret.headers:
ret.headers[_CT]
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if "error" in err:
raise ServerError(**err["error"])
else:
raise ServerError("Unknown", 0, ret.text)
else:
raise ServerError("Unknown", 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
ret.encoding = "utf-8"
resp = _json.loads(ret.text)
if "result" not in resp:
raise ServerError("Unknown", 0, "An unknown server error occurred")
return resp["result"]
def sync_call(
self, service_method, param_list, service_version=None, json_rpc_context=None
):
if json_rpc_context and not isinstance(json_rpc_context, dict):
raise ValueError(
"Method send_data: argument json_rpc_context is not type dict as required."
)
url = self.url
if self.use_url_lookup:
module_name = service_method.split(".")[0]
service_status_ret = self._call(
self.url,
"ServiceWizard.get_service_status",
[{"module_name": module_name, "version": service_version}],
None,
)[0]
url = service_status_ret["url"]
return self._call(url, service_method, param_list, json_rpc_context)
| kbase/narrative | src/biokbase/service/Client.py | Python | mit | 7,649 | 0.000392 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO Ematelot SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'matelot' et ses sous-commandes.
Dans ce fichier se trouve la commande même.
"""
from primaires.interpreteur.commande.commande import Commande
from .affecter import PrmAffecter
from .creer import PrmCreer
from .editer import PrmEditer
from .info import PrmInfo
from .liste import PrmListe
from .poste import PrmPoste
from .promouvoir import PrmPromouvoir
from .recruter import PrmRecruter
from .renommer import PrmRenommer
from .retirer import PrmRetirer
from .score import PrmScore
class CmdMatelot(Commande):
"""Commande 'matelot'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "matelot", "seaman")
self.nom_categorie = "navire"
self.aide_courte = "manipulation des matelots"
self.aide_longue = \
"Cette commande permet de manipuler les matelots de " \
"votre équipage individuellement. Il existe également " \
"la commande %équipage% qui permet de manipuler l'équipage " \
"d'un coup d'un seul."
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmAffecter())
self.ajouter_parametre(PrmCreer())
self.ajouter_parametre(PrmEditer())
self.ajouter_parametre(PrmInfo())
self.ajouter_parametre(PrmListe())
self.ajouter_parametre(PrmPoste())
self.ajouter_parametre(PrmPromouvoir())
self.ajouter_parametre(PrmRecruter())
self.ajouter_parametre(PrmRenommer())
self.ajouter_parametre(PrmRetirer())
self.ajouter_parametre(PrmScore())
| vlegoff/tsunami | src/secondaires/navigation/commandes/matelot/__init__.py | Python | bsd-3-clause | 3,210 | 0.000312 |
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, laplace_transform, inverse_laplace_transform,
fourier_transform, inverse_fourier_transform,
sine_transform, inverse_sine_transform,
cosine_transform, inverse_cosine_transform,
hankel_transform, inverse_hankel_transform,
LaplaceTransform, FourierTransform, SineTransform, CosineTransform,
InverseLaplaceTransform, InverseFourierTransform, InverseSineTransform, InverseCosineTransform,
HankelTransform, InverseHankelTransform)
from sympy import (
gamma, exp, oo, Heaviside, symbols, Symbol, re, factorial, pi,
cos, S, And, sin, sqrt, I, log, tan, hyperexpand, meijerg,
EulerGamma, erf, besselj, bessely, besseli, besselk,
exp_polar, polar_lift, unpolarify, Function, expint, expand_mul,
combsimp, trigsimp)
from sympy.utilities.pytest import XFAIL, slow, skip
from sympy.matrices import Matrix, eye
from sympy.abc import x, s, a, b, c, d
nu, beta, rho = symbols('nu beta rho')
def test_undefined_function():
from sympy import Function, MellinTransform
f = Function('f')
assert mellin_transform(f(x), x, s) == MellinTransform(f(x), x, s)
assert mellin_transform(f(x) + exp(-x), x, s) == \
(MellinTransform(f(x), x, s) + gamma(s), (0, oo), True)
assert laplace_transform(2*f(x), x, s) == 2*LaplaceTransform(f(x), x, s)
# TODO test derivative and other rules when implemented
def test_free_symbols():
from sympy import Function
f = Function('f')
assert mellin_transform(f(x), x, s).free_symbols == set([s])
assert mellin_transform(f(x)*a, x, s).free_symbols == set([s, a])
def test_as_integral():
from sympy import Function, Integral
f = Function('f')
assert mellin_transform(f(x), x, s).rewrite('Integral') == \
Integral(x**(s - 1)*f(x), (x, 0, oo))
assert fourier_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-2*I*pi*s*x), (x, -oo, oo))
assert laplace_transform(f(x), x, s).rewrite('Integral') == \
Integral(f(x)*exp(-s*x), (x, 0, oo))
assert str(inverse_mellin_transform(f(s), s, x, (a, b)).rewrite('Integral')) \
== "Integral(x**(-s)*f(s), (s, _c - oo*I, _c + oo*I))"
assert str(inverse_laplace_transform(f(s), s, x).rewrite('Integral')) == \
"Integral(f(s)*exp(s*x), (s, _c - oo*I, _c + oo*I))"
assert inverse_fourier_transform(f(s), s, x).rewrite('Integral') == \
Integral(f(s)*exp(2*I*pi*s*x), (s, -oo, oo))
# NOTE this is stuck in risch because meijerint cannot handle it
@slow
@XFAIL
def test_mellin_transform_fail():
skip("Risch takes forever.")
from sympy import Max, Min
MT = mellin_transform
bpos = symbols('b', positive=True)
bneg = symbols('b', negative=True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
# TODO does not work with bneg, argument wrong. Needs changes to matching.
assert MT(expr.subs(b, -bpos), x, s) == \
((-1)**(a + 1)*2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(a + s)
*gamma(1 - a - 2*s)/gamma(1 - s),
(-re(a), -re(a)/2 + S(1)/2), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, -bpos), x, s) == \
(
2**(a + 2*s)*a*bpos**(a + 2*s)*gamma(-a - 2*
s)*gamma(a + s)/gamma(-s + 1),
(-re(a), -re(a)/2), True)
# Test exponent 1:
assert MT(expr.subs({b: -bpos, a: 1}), x, s) == \
(-bpos**(2*s + 1)*gamma(s)*gamma(-s - S(1)/2)/(2*sqrt(pi)),
(-1, -S(1)/2), True)
def test_mellin_transform():
from sympy import Max, Min, Ne
MT = mellin_transform
bpos = symbols('b', positive=True)
# 8.4.2
assert MT(x**nu*Heaviside(x - 1), x, s) == \
(-1/(nu + s), (-oo, -re(nu)), True)
assert MT(x**nu*Heaviside(1 - x), x, s) == \
(1/(nu + s), (-re(nu), oo), True)
assert MT((1 - x)**(beta - 1)*Heaviside(1 - x), x, s) == \
(gamma(beta)*gamma(s)/gamma(beta + s), (0, oo), re(-beta) < 0)
assert MT((x - 1)**(beta - 1)*Heaviside(x - 1), x, s) == \
(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
(-oo, -re(beta) + 1), re(-beta) < 0)
assert MT((1 + x)**(-rho), x, s) == \
(gamma(s)*gamma(rho - s)/gamma(rho), (0, re(rho)), True)
# TODO also the conditions should be simplified
assert MT(abs(1 - x)**(-rho), x, s) == (
cos(pi*(rho/2 - s))*gamma(s)*gamma(rho - s)/(cos(pi*rho/2)*gamma(rho)),
(0, re(rho)), And(re(rho) - 1 < 0, re(rho) < 1))
mt = MT((1 - x)**(beta - 1)*Heaviside(1 - x)
+ a*(x - 1)**(beta - 1)*Heaviside(x - 1), x, s)
assert mt[1], mt[2] == ((0, -re(beta) + 1), True)
assert MT((x**a - b**a)/(x - b), x, s)[0] == \
pi*b**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s)))
assert MT((x**a - bpos**a)/(x - bpos), x, s) == \
(pi*bpos**(a + s - 1)*sin(pi*a)/(sin(pi*s)*sin(pi*(a + s))),
(Max(-re(a), 0), Min(1 - re(a), 1)), True)
expr = (sqrt(x + b**2) + b)**a
assert MT(expr.subs(b, bpos), x, s) == \
(-a*(2*bpos)**(a + 2*s)*gamma(s)*gamma(-a - 2*s)/gamma(-a - s + 1),
(0, -re(a)/2), True)
expr = (sqrt(x + b**2) + b)**a/sqrt(x + b**2)
assert MT(expr.subs(b, bpos), x, s) == \
(2**(a + 2*s)*bpos**(a + 2*s - 1)*gamma(s)
*gamma(1 - a - 2*s)/gamma(1 - a - s),
(0, -re(a)/2 + S(1)/2), True)
# 8.4.2
assert MT(exp(-x), x, s) == (gamma(s), (0, oo), True)
assert MT(exp(-1/x), x, s) == (gamma(-s), (-oo, 0), True)
# 8.4.5
assert MT(log(x)**4*Heaviside(1 - x), x, s) == (24/s**5, (0, oo), True)
assert MT(log(x)**3*Heaviside(x - 1), x, s) == (6/s**4, (-oo, 0), True)
assert MT(log(x + 1), x, s) == (pi/(s*sin(pi*s)), (-1, 0), True)
assert MT(log(1/x + 1), x, s) == (pi/(s*sin(pi*s)), (0, 1), True)
assert MT(log(abs(1 - x)), x, s) == (pi/(s*tan(pi*s)), (-1, 0), True)
assert MT(log(abs(1 - 1/x)), x, s) == (pi/(s*tan(pi*s)), (0, 1), True)
# TODO we cannot currently do these (needs summation of 3F2(-1))
# this also implies that they cannot be written as a single g-function
# (although this is possible)
mt = MT(log(x)/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)**2/(x + 1), x, s)
assert mt[1:] == ((0, 1), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
mt = MT(log(x)/(x + 1)**2, x, s)
assert mt[1:] == ((0, 2), True)
assert not hyperexpand(mt[0], allow_hyper=True).has(meijerg)
# 8.4.14
assert MT(erf(sqrt(x)), x, s) == \
(-gamma(s + S(1)/2)/(sqrt(pi)*s), (-S(1)/2, 0), True)
def test_mellin_transform_bessel():
from sympy import Max, Min, hyper, meijerg
MT = mellin_transform
# 8.4.19
assert MT(besselj(a, 2*sqrt(x)), x, s) == \
(gamma(a/2 + s)/gamma(a/2 - s + 1), (-re(a)/2, S(3)/4), True)
assert MT(sin(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(-2*s + S(1)/2)*gamma(a/2 + s + S(1)/2)/(
gamma(-a/2 - s + 1)*gamma(a - 2*s + 1)), (
-re(a)/2 - S(1)/2, S(1)/4), True)
assert MT(cos(sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(2**a*gamma(a/2 + s)*gamma(-2*s + S(1)/2)/(
gamma(-a/2 - s + S(1)/2)*gamma(a - 2*s + 1)), (
-re(a)/2, S(1)/4), True)
assert MT(besselj(a, sqrt(x))**2, x, s) == \
(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
(-re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(-a, sqrt(x)), x, s) == \
(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - a - s)*gamma(1 + a - s)),
(0, S(1)/2), True)
# NOTE: prudnikov gives the strip below as (1/2 - re(a), 1). As far as
# I can see this is wrong (since besselj(z) ~ 1/sqrt(z) for z large)
assert MT(besselj(a - 1, sqrt(x))*besselj(a, sqrt(x)), x, s) == \
(gamma(1 - s)*gamma(a + s - S(1)/2)
/ (sqrt(pi)*gamma(S(3)/2 - s)*gamma(a - s + S(1)/2)),
(S(1)/2 - re(a), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*besselj(b, sqrt(x)), x, s) == \
(4**s*gamma(1 - 2*s)*gamma((a + b)/2 + s)
/ (gamma(1 - s + (b - a)/2)*gamma(1 - s + (a - b)/2)
*gamma( 1 - s + (a + b)/2)),
(-(re(a) + re(b))/2, S(1)/2), True)
assert MT(besselj(a, sqrt(x))**2 + besselj(-a, sqrt(x))**2, x, s)[1:] == \
((Max(re(a), -re(a)), S(1)/2), True)
# Section 8.4.20
assert MT(bessely(a, 2*sqrt(x)), x, s) == \
(-cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)/pi,
(Max(-re(a)/2, re(a)/2), S(3)/4), True)
assert MT(sin(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*sin(pi*(a/2 - s))*gamma(S(1)/2 - 2*s)
* gamma((1 - a)/2 + s)*gamma((1 + a)/2 + s)
/ (sqrt(pi)*gamma(1 - s - a/2)*gamma(1 - s + a/2)),
(Max(-(re(a) + 1)/2, (re(a) - 1)/2), S(1)/4), True)
assert MT(cos(sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - s))*gamma(s - a/2)*gamma(s + a/2)*gamma(S(1)/2 - 2*s)
/ (sqrt(pi)*gamma(S(1)/2 - s - a/2)*gamma(S(1)/2 - s + a/2)),
(Max(-re(a)/2, re(a)/2), S(1)/4), True)
assert MT(besselj(a, sqrt(x))*bessely(a, sqrt(x)), x, s) == \
(-cos(pi*s)*gamma(s)*gamma(a + s)*gamma(S(1)/2 - s)
/ (pi**S('3/2')*gamma(1 + a - s)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besselj(a, sqrt(x))*bessely(b, sqrt(x)), x, s) == \
(-4**s*cos(pi*(a/2 - b/2 + s))*gamma(1 - 2*s)
* gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s)
/ (pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
(Max((-re(a) + re(b))/2, (-re(a) - re(b))/2), S(1)/2), True)
# NOTE bessely(a, sqrt(x))**2 and bessely(a, sqrt(x))*bessely(b, sqrt(x))
# are a mess (no matter what way you look at it ...)
assert MT(bessely(a, sqrt(x))**2, x, s)[1:] == \
((Max(-re(a), 0, re(a)), S(1)/2), True)
# Section 8.4.22
# TODO we can't do any of these (delicate cancellation)
# Section 8.4.23
assert MT(besselk(a, 2*sqrt(x)), x, s) == \
(gamma(
s - a/2)*gamma(s + a/2)/2, (Max(-re(a)/2, re(a)/2), oo), True)
assert MT(besselj(a, 2*sqrt(2*sqrt(x)))*besselk(
a, 2*sqrt(2*sqrt(x))), x, s) == (4**(-s)*gamma(2*s)*
gamma(a/2 + s)/(2*gamma(a/2 - s + 1)), (Max(0, -re(a)/2), oo), True)
# TODO bessely(a, x)*besselk(a, x) is a mess
assert MT(besseli(a, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(gamma(s)*gamma(
a + s)*gamma(-s + S(1)/2)/(2*sqrt(pi)*gamma(a - s + 1)),
(Max(-re(a), 0), S(1)/2), True)
assert MT(besseli(b, sqrt(x))*besselk(a, sqrt(x)), x, s) == \
(2**(2*s - 1)*gamma(-2*s + 1)*gamma(-a/2 + b/2 + s)* \
gamma(a/2 + b/2 + s)/(gamma(-a/2 + b/2 - s + 1)* \
gamma(a/2 + b/2 - s + 1)), (Max(-re(a)/2 - re(b)/2, \
re(a)/2 - re(b)/2), S(1)/2), True)
# TODO products of besselk are a mess
mt = MT(exp(-x/2)*besselk(a, x/2), x, s)
mt0 = combsimp((trigsimp(combsimp(mt[0].expand(func=True)))))
assert mt0 == 2*pi**(S(3)/2)*cos(pi*s)*gamma(-s + S(1)/2)/(
(cos(2*pi*a) - cos(2*pi*s))*gamma(-a - s + 1)*gamma(a - s + 1))
assert mt[1:] == ((Max(-re(a), re(a)), oo), True)
# TODO exp(x/2)*besselk(a, x/2) [etc] cannot currently be done
# TODO various strange products of special orders
def test_expint():
from sympy import E1, expint, Max, re, lerchphi, Symbol, simplify, Si, Ci, Ei
aneg = Symbol('a', negative=True)
u = Symbol('u', polar=True)
assert mellin_transform(E1(x), x, s) == (gamma(s)/s, (0, oo), True)
assert inverse_mellin_transform(gamma(s)/s, s, x,
(0, oo)).rewrite(expint).expand() == E1(x)
assert mellin_transform(expint(a, x), x, s) == \
(gamma(s)/(a + s - 1), (Max(1 - re(a), 0), oo), True)
# XXX IMT has hickups with complicated strips ...
assert simplify(unpolarify(
inverse_mellin_transform(gamma(s)/(aneg + s - 1), s, x,
(1 - aneg, oo)).rewrite(expint).expand(func=True))) == \
expint(aneg, x)
assert mellin_transform(Si(x), x, s) == \
(-2**s*sqrt(pi)*gamma(s/2 + S(1)/2)/(
2*s*gamma(-s/2 + 1)), (-1, 0), True)
assert inverse_mellin_transform(-2**s*sqrt(pi)*gamma((s + 1)/2)
/(2*s*gamma(-s/2 + 1)), s, x, (-1, 0)) \
== Si(x)
assert mellin_transform(Ci(sqrt(x)), x, s) == \
(-2**(2*s - 1)*sqrt(pi)*gamma(s)/(s*gamma(-s + S(1)/2)), (0, 1), True)
assert inverse_mellin_transform(
-4**s*sqrt(pi)*gamma(s)/(2*s*gamma(-s + S(1)/2)),
s, u, (0, 1)).expand() == Ci(sqrt(u))
# TODO LT of Si, Shi, Chi is a mess ...
assert laplace_transform(Ci(x), x, s) == (-log(1 + s**2)/2/s, 0, True)
assert laplace_transform(expint(a, x), x, s) == \
(lerchphi(s*polar_lift(-1), 1, a), 0, S(0) < re(a))
assert laplace_transform(expint(1, x), x, s) == (log(s + 1)/s, 0, True)
assert laplace_transform(expint(2, x), x, s) == \
((s - log(s + 1))/s**2, 0, True)
assert inverse_laplace_transform(-log(1 + s**2)/2/s, s, u).expand() == \
Heaviside(u)*Ci(u)
assert inverse_laplace_transform(log(s + 1)/s, s, x).rewrite(expint) == \
Heaviside(x)*E1(x)
assert inverse_laplace_transform((s - log(s + 1))/s**2, s,
x).rewrite(expint).expand() == \
(expint(2, x)*Heaviside(x)).rewrite(Ei).rewrite(expint).expand()
def test_inverse_mellin_transform():
from sympy import (sin, simplify, expand_func, powsimp, Max, Min, expand,
powdenest, powsimp, exp_polar, combsimp, cos, cot)
IMT = inverse_mellin_transform
assert IMT(gamma(s), s, x, (0, oo)) == exp(-x)
assert IMT(gamma(-s), s, x, (-oo, 0)) == exp(-1/x)
assert simplify(IMT(s/(2*s**2 - 2), s, x, (2, oo))) == \
(x**2 + 1)*Heaviside(1 - x)/(4*x)
# test passing "None"
assert IMT(1/(s**2 - 1), s, x, (-1, None)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
assert IMT(1/(s**2 - 1), s, x, (None, 1)) == \
-x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x)
# test expansion of sums
assert IMT(gamma(s) + gamma(s - 1), s, x, (1, oo)) == (x + 1)*exp(-x)/x
# test factorisation of polys
r = symbols('r', real=True)
assert IMT(1/(s**2 + 1), s, exp(-x), (None, oo)
).subs(x, r).rewrite(sin).simplify() \
== sin(r)*Heaviside(1 - exp(-r))
# test multiplicative substitution
_a, _b = symbols('a b', positive=True)
assert IMT(_b**(-s/_a)*factorial(s/_a)/s, s, x, (0, oo)) == exp(-_b*x**_a)
assert IMT(factorial(_a/_b + s/_b)/(_a + s), s, x, (-_a, oo)) == x**_a*exp(-x**_b)
def simp_pows(expr):
return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
# Now test the inverses of all direct transforms tested above
# Section 8.4.2
nu = symbols('nu', real=True, finite=True)
assert IMT(-1/(nu + s), s, x, (-oo, None)) == x**nu*Heaviside(x - 1)
assert IMT(1/(nu + s), s, x, (None, oo)) == x**nu*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(s)/gamma(s + beta), s, x, (0, oo))) \
== (1 - x)**(beta - 1)*Heaviside(1 - x)
assert simp_pows(IMT(gamma(beta)*gamma(1 - beta - s)/gamma(1 - s),
s, x, (-oo, None))) \
== (x - 1)**(beta - 1)*Heaviside(x - 1)
assert simp_pows(IMT(gamma(s)*gamma(rho - s)/gamma(rho), s, x, (0, None))) \
== (1/(x + 1))**rho
assert simp_pows(IMT(d**c*d**(s - 1)*sin(pi*c)
*gamma(s)*gamma(s + c)*gamma(1 - s)*gamma(1 - s - c)/pi,
s, x, (Max(-re(c), 0), Min(1 - re(c), 1)))) \
== (x**c - d**c)/(x - d)
assert simplify(IMT(1/sqrt(pi)*(-c/2)*gamma(s)*gamma((1 - c)/2 - s)
*gamma(-c/2 - s)/gamma(1 - c - s),
s, x, (0, -re(c)/2))) == \
(1 + sqrt(x + 1))**c
assert simplify(IMT(2**(a + 2*s)*b**(a + 2*s - 1)*gamma(s)*gamma(1 - a - 2*s)
/gamma(1 - a - s), s, x, (0, (-re(a) + 1)/2))) == \
b**(a - 1)*(sqrt(1 + x/b**2) + 1)**(a - 1)*(b**2*sqrt(1 + x/b**2) +
b**2 + x)/(b**2 + x)
assert simplify(IMT(-2**(c + 2*s)*c*b**(c + 2*s)*gamma(s)*gamma(-c - 2*s)
/ gamma(-c - s + 1), s, x, (0, -re(c)/2))) == \
b**c*(sqrt(1 + x/b**2) + 1)**c
# Section 8.4.5
assert IMT(24/s**5, s, x, (0, oo)) == log(x)**4*Heaviside(1 - x)
assert expand(IMT(6/s**4, s, x, (-oo, 0)), force=True) == \
log(x)**3*Heaviside(x - 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (-1, 0)) == log(x + 1)
assert IMT(pi/(s*sin(pi*s/2)), s, x, (-2, 0)) == log(x**2 + 1)
assert IMT(pi/(s*sin(2*pi*s)), s, x, (-S(1)/2, 0)) == log(sqrt(x) + 1)
assert IMT(pi/(s*sin(pi*s)), s, x, (0, 1)) == log(1 + 1/x)
# TODO
def mysimp(expr):
from sympy import expand, logcombine, powsimp
return expand(
powsimp(logcombine(expr, force=True), force=True, deep=True),
force=True).replace(exp_polar, exp)
assert mysimp(mysimp(IMT(pi/(s*tan(pi*s)), s, x, (-1, 0)))) in [
log(1 - x)*Heaviside(1 - x) + log(x - 1)*Heaviside(x - 1),
log(x)*Heaviside(x - 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1)]
# test passing cot
assert mysimp(IMT(pi*cot(pi*s)/s, s, x, (0, 1))) in [
log(1/x - 1)*Heaviside(1 - x) + log(1 - 1/x)*Heaviside(x - 1),
-log(x)*Heaviside(-x + 1) + log(1 - 1/x)*Heaviside(x - 1) + log(-x +
1)*Heaviside(-x + 1), ]
# 8.4.14
assert IMT(-gamma(s + S(1)/2)/(sqrt(pi)*s), s, x, (-S(1)/2, 0)) == \
erf(sqrt(x))
# 8.4.19
assert simplify(IMT(gamma(a/2 + s)/gamma(a/2 - s + 1), s, x, (-re(a)/2, S(3)/4))) \
== besselj(a, 2*sqrt(x))
assert simplify(IMT(2**a*gamma(S(1)/2 - 2*s)*gamma(s + (a + 1)/2)
/ (gamma(1 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-(re(a) + 1)/2, S(1)/4))) == \
sin(sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(2**a*gamma(a/2 + s)*gamma(S(1)/2 - 2*s)
/ (gamma(S(1)/2 - s - a/2)*gamma(1 - 2*s + a)),
s, x, (-re(a)/2, S(1)/4))) == \
cos(sqrt(x))*besselj(a, sqrt(x))
# TODO this comes out as an amazing mess, but simplifies nicely
assert simplify(IMT(gamma(a + s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s)*gamma(1 + a - s)),
s, x, (-re(a), S(1)/2))) == \
besselj(a, sqrt(x))**2
assert simplify(IMT(gamma(s)*gamma(S(1)/2 - s)
/ (sqrt(pi)*gamma(1 - s - a)*gamma(1 + a - s)),
s, x, (0, S(1)/2))) == \
besselj(-a, sqrt(x))*besselj(a, sqrt(x))
assert simplify(IMT(4**s*gamma(-2*s + 1)*gamma(a/2 + b/2 + s)
/ (gamma(-a/2 + b/2 - s + 1)*gamma(a/2 - b/2 - s + 1)
*gamma(a/2 + b/2 - s + 1)),
s, x, (-(re(a) + re(b))/2, S(1)/2))) == \
besselj(a, sqrt(x))*besselj(b, sqrt(x))
# Section 8.4.20
# TODO this can be further simplified!
assert simplify(IMT(-2**(2*s)*cos(pi*a/2 - pi*b/2 + pi*s)*gamma(-2*s + 1) *
gamma(a/2 - b/2 + s)*gamma(a/2 + b/2 + s) /
(pi*gamma(a/2 - b/2 - s + 1)*gamma(a/2 + b/2 - s + 1)),
s, x,
(Max(-re(a)/2 - re(b)/2, -re(a)/2 + re(b)/2), S(1)/2))) == \
besselj(a, sqrt(x))*-(besselj(-b, sqrt(x)) -
besselj(b, sqrt(x))*cos(pi*b))/sin(pi*b)
# TODO more
# for coverage
assert IMT(pi/cos(pi*s), s, x, (0, S(1)/2)) == sqrt(x)/(x + 1)
def test_laplace_transform():
from sympy import (fresnels, fresnelc, hyper)
LT = laplace_transform
a, b, c, = symbols('a b c', positive=True)
t = symbols('t')
w = Symbol("w")
f = Function("f")
# Test unevaluated form
assert laplace_transform(f(t), t, w) == LaplaceTransform(f(t), t, w)
assert inverse_laplace_transform(
f(w), w, t, plane=0) == InverseLaplaceTransform(f(w), w, t, 0)
# test a bug
spos = symbols('s', positive=True)
assert LT(exp(t), t, spos)[:2] == (1/(spos - 1), True)
# basic tests from wikipedia
assert LT((t - a)**b*exp(-c*(t - a))*Heaviside(t - a), t, s) == \
((s + c)**(-b - 1)*exp(-a*s)*gamma(b + 1), -c, True)
assert LT(t**a, t, s) == (s**(-a - 1)*gamma(a + 1), 0, True)
assert LT(Heaviside(t), t, s) == (1/s, 0, True)
assert LT(Heaviside(t - a), t, s) == (exp(-a*s)/s, 0, True)
assert LT(1 - exp(-a*t), t, s) == (a/(s*(a + s)), 0, True)
assert LT((exp(2*t) - 1)*exp(-b - t)*Heaviside(t)/2, t, s, noconds=True) \
== exp(-b)/(s**2 - 1)
assert LT(exp(t), t, s)[:2] == (1/(s - 1), 1)
assert LT(exp(2*t), t, s)[:2] == (1/(s - 2), 2)
assert LT(exp(a*t), t, s)[:2] == (1/(s - a), a)
assert LT(log(t/a), t, s) == ((log(a*s) + EulerGamma)/s/-1, 0, True)
assert LT(erf(t), t, s) == ((-erf(s/2) + 1)*exp(s**2/4)/s, 0, True)
assert LT(sin(a*t), t, s) == (a/(a**2 + s**2), 0, True)
assert LT(cos(a*t), t, s) == (s/(a**2 + s**2), 0, True)
# TODO would be nice to have these come out better
assert LT(
exp(-a*t)*sin(b*t), t, s) == (b/(b**2 + (a + s)**2), -a, True)
assert LT(exp(-a*t)*cos(b*t), t, s) == \
((a + s)/(b**2 + (a + s)**2), -a, True)
# TODO sinh, cosh have delicate cancellation
assert LT(besselj(0, t), t, s) == (1/sqrt(1 + s**2), 0, True)
assert LT(besselj(1, t), t, s) == (1 - 1/sqrt(1 + 1/s**2), 0, True)
# TODO general order works, but is a *mess*
# TODO besseli also works, but is an even greater mess
# test a bug in conditions processing
# TODO the auxiliary condition should be recognised/simplified
assert LT(exp(t)*cos(t), t, s)[:-1] in [
((s - 1)/(s**2 - 2*s + 2), -oo),
((s - 1)/((s - 1)**2 + 1), -oo),
]
# Fresnel functions
assert laplace_transform(fresnels(t), t, s) == \
((-sin(s**2/(2*pi))*fresnels(s/pi) + sin(s**2/(2*pi))/2 -
cos(s**2/(2*pi))*fresnelc(s/pi) + cos(s**2/(2*pi))/2)/s, 0, True)
assert laplace_transform(fresnelc(t), t, s) == (
(sin(s**2/(2*pi))*fresnelc(s/pi)/s - cos(s**2/(2*pi))*fresnels(s/pi)/s
+ sqrt(2)*cos(s**2/(2*pi) + pi/4)/(2*s), 0, True))
assert LT(Matrix([[exp(t), t*exp(-t)], [t*exp(-t), exp(t)]]), t, s) ==\
Matrix([
[(1/(s - 1), 1, True), ((s + 1)**(-2), 0, True)],
[((s + 1)**(-2), 0, True), (1/(s - 1), 1, True)]
])
def test_inverse_laplace_transform():
from sympy import (expand, sinh, cosh, besselj, besseli, exp_polar,
unpolarify, simplify, factor_terms)
ILT = inverse_laplace_transform
a, b, c, = symbols('a b c', positive=True, finite=True)
t = symbols('t')
def simp_hyp(expr):
return factor_terms(expand_mul(expr)).rewrite(sin)
# just test inverses of all of the above
assert ILT(1/s, s, t) == Heaviside(t)
assert ILT(1/s**2, s, t) == t*Heaviside(t)
assert ILT(1/s**5, s, t) == t**4*Heaviside(t)/24
assert ILT(exp(-a*s)/s, s, t) == Heaviside(t - a)
assert ILT(exp(-a*s)/(s + b), s, t) == exp(b*(a - t))*Heaviside(-a + t)
assert ILT(a/(s**2 + a**2), s, t) == sin(a*t)*Heaviside(t)
assert ILT(s/(s**2 + a**2), s, t) == cos(a*t)*Heaviside(t)
# TODO is there a way around simp_hyp?
assert simp_hyp(ILT(a/(s**2 - a**2), s, t)) == sinh(a*t)*Heaviside(t)
assert simp_hyp(ILT(s/(s**2 - a**2), s, t)) == cosh(a*t)*Heaviside(t)
assert ILT(a/((s + b)**2 + a**2), s, t) == exp(-b*t)*sin(a*t)*Heaviside(t)
assert ILT(
(s + b)/((s + b)**2 + a**2), s, t) == exp(-b*t)*cos(a*t)*Heaviside(t)
# TODO sinh/cosh shifted come out a mess. also delayed trig is a mess
# TODO should this simplify further?
assert ILT(exp(-a*s)/s**b, s, t) == \
(t - a)**(b - 1)*Heaviside(t - a)/gamma(b)
assert ILT(exp(-a*s)/sqrt(1 + s**2), s, t) == \
Heaviside(t - a)*besselj(0, a - t) # note: besselj(0, x) is even
# XXX ILT turns these branch factor into trig functions ...
assert simplify(ILT(a**b*(s + sqrt(s**2 - a**2))**(-b)/sqrt(s**2 - a**2),
s, t).rewrite(exp)) == \
Heaviside(t)*besseli(b, a*t)
assert ILT(a**b*(s + sqrt(s**2 + a**2))**(-b)/sqrt(s**2 + a**2),
s, t).rewrite(exp) == \
Heaviside(t)*besselj(b, a*t)
assert ILT(1/(s*sqrt(s + 1)), s, t) == Heaviside(t)*erf(sqrt(t))
# TODO can we make erf(t) work?
assert ILT(1/(s**2*(s**2 + 1)),s,t) == (t - sin(t))*Heaviside(t)
assert ILT( (s * eye(2) - Matrix([[1, 0], [0, 2]])).inv(), s, t) ==\
Matrix([[exp(t)*Heaviside(t), 0], [0, exp(2*t)*Heaviside(t)]])
def test_fourier_transform():
from sympy import simplify, expand, expand_complex, factor, expand_trig
FT = fourier_transform
IFT = inverse_fourier_transform
def simp(x):
return simplify(expand_trig(expand_complex(expand(x))))
def sinc(x):
return sin(pi*x)/(pi*x)
k = symbols('k', real=True)
f = Function("f")
# TODO for this to work with real a, need to expand abs(a*x) to abs(a)*abs(x)
a = symbols('a', positive=True)
b = symbols('b', positive=True)
posk = symbols('posk', positive=True)
# Test unevaluated form
assert fourier_transform(f(x), x, k) == FourierTransform(f(x), x, k)
assert inverse_fourier_transform(
f(k), k, x) == InverseFourierTransform(f(k), k, x)
# basic examples from wikipedia
assert simp(FT(Heaviside(1 - abs(2*a*x)), x, k)) == sinc(k/a)/a
# TODO IFT is a *mess*
assert simp(FT(Heaviside(1 - abs(a*x))*(1 - abs(a*x)), x, k)) == sinc(k/a)**2/a
# TODO IFT
assert factor(FT(exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)
# NOTE: the ift comes out in pieces
assert IFT(1/(a + 2*pi*I*x), x, posk,
noconds=False) == (exp(-a*posk), True)
assert IFT(1/(a + 2*pi*I*x), x, -posk,
noconds=False) == (0, True)
assert IFT(1/(a + 2*pi*I*x), x, symbols('k', negative=True),
noconds=False) == (0, True)
# TODO IFT without factoring comes out as meijer g
assert factor(FT(x*exp(-a*x)*Heaviside(x), x, k), extension=I) == \
1/(a + 2*pi*I*k)**2
assert FT(exp(-a*x)*sin(b*x)*Heaviside(x), x, k) == \
b/(b**2 + (a + 2*I*pi*k)**2)
assert FT(exp(-a*x**2), x, k) == sqrt(pi)*exp(-pi**2*k**2/a)/sqrt(a)
assert IFT(sqrt(pi/a)*exp(-(pi*k)**2/a), k, x) == exp(-a*x**2)
assert FT(exp(-a*abs(x)), x, k) == 2*a/(a**2 + 4*pi**2*k**2)
# TODO IFT (comes out as meijer G)
# TODO besselj(n, x), n an integer > 0 actually can be done...
# TODO are there other common transforms (no distributions!)?
def test_sine_transform():
from sympy import sinh, cosh, EulerGamma
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert sine_transform(f(t), t, w) == SineTransform(f(t), t, w)
assert inverse_sine_transform(
f(w), w, t) == InverseSineTransform(f(w), w, t)
assert sine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_sine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert sine_transform(
(1/sqrt(t))**3, t, w) == sqrt(w)*gamma(S(1)/4)/(2*gamma(S(5)/4))
assert sine_transform(t**(-a), t, w) == 2**(
-a + S(1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma((a + 1)/2)
assert inverse_sine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + S(1)/2), w, t) == t**(-a)
assert sine_transform(
exp(-a*t), t, w) == sqrt(2)*w/(sqrt(pi)*(a**2 + w**2))
assert inverse_sine_transform(
sqrt(2)*w/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert sine_transform(
log(t)/t, t, w) == -sqrt(2)*sqrt(pi)*(log(w**2) + 2*EulerGamma)/4
assert sine_transform(
t*exp(-a*t**2), t, w) == sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2))
assert inverse_sine_transform(
sqrt(2)*w*exp(-w**2/(4*a))/(4*a**(S(3)/2)), w, t) == t*exp(-a*t**2)
def test_cosine_transform():
from sympy import sinh, cosh, Si, Ci
t = symbols("t")
w = symbols("w")
a = symbols("a")
f = Function("f")
# Test unevaluated form
assert cosine_transform(f(t), t, w) == CosineTransform(f(t), t, w)
assert inverse_cosine_transform(
f(w), w, t) == InverseCosineTransform(f(w), w, t)
assert cosine_transform(1/sqrt(t), t, w) == 1/sqrt(w)
assert inverse_cosine_transform(1/sqrt(w), w, t) == 1/sqrt(t)
assert cosine_transform(1/(
a**2 + t**2), t, w) == sqrt(2)*sqrt(pi)*exp(-a*w)/(2*a)
assert cosine_transform(t**(
-a), t, w) == 2**(-a + S(1)/2)*w**(a - 1)*gamma((-a + 1)/2)/gamma(a/2)
assert inverse_cosine_transform(2**(-a + S(
1)/2)*w**(a - 1)*gamma(-a/2 + S(1)/2)/gamma(a/2), w, t) == t**(-a)
assert cosine_transform(
exp(-a*t), t, w) == sqrt(2)*a/(sqrt(pi)*(a**2 + w**2))
assert inverse_cosine_transform(
sqrt(2)*a/(sqrt(pi)*(a**2 + w**2)), w, t) == exp(-a*t)
assert cosine_transform(exp(-a*sqrt(t))*cos(a*sqrt(
t)), t, w) == a*exp(-a**2/(2*w))/(2*w**(S(3)/2))
assert cosine_transform(1/(a + t), t, w) == sqrt(2)*(
(-2*Si(a*w) + pi)*sin(a*w)/2 - cos(a*w)*Ci(a*w))/sqrt(pi)
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2, 0), ()), (
(S(1)/2, 0, 0), (S(1)/2,)), a**2*w**2/4)/(2*pi), w, t) == 1/(a + t)
assert cosine_transform(1/sqrt(a**2 + t**2), t, w) == sqrt(2)*meijerg(
((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi))
assert inverse_cosine_transform(sqrt(2)*meijerg(((S(1)/2,), ()), ((0, 0), (S(1)/2,)), a**2*w**2/4)/(2*sqrt(pi)), w, t) == 1/(t*sqrt(a**2/t**2 + 1))
def test_hankel_transform():
from sympy import sinh, cosh, gamma, sqrt, exp
r = Symbol("r")
k = Symbol("k")
nu = Symbol("nu")
m = Symbol("m")
a = symbols("a")
assert hankel_transform(1/r, r, k, 0) == 1/k
assert inverse_hankel_transform(1/k, k, r, 0) == 1/r
assert hankel_transform(
1/r**m, r, k, 0) == 2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2)
assert inverse_hankel_transform(
2**(-m + 1)*k**(m - 2)*gamma(-m/2 + 1)/gamma(m/2), k, r, 0) == r**(-m)
assert hankel_transform(1/r**m, r, k, nu) == (
2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2))
assert inverse_hankel_transform(2**(-m + 1)*k**(
m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2), k, r, nu) == r**(-m)
assert hankel_transform(r**nu*exp(-a*r), r, k, nu) == \
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(
3)/2)*gamma(nu + S(3)/2)/sqrt(pi)
assert inverse_hankel_transform(
2**(nu + 1)*a*k**(-nu - 3)*(a**2/k**2 + 1)**(-nu - S(3)/2)*gamma(
nu + S(3)/2)/sqrt(pi), k, r, nu) == r**nu*exp(-a*r)
def test_issue_7181():
assert mellin_transform(1/(1 - x), x, s) != None
| wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/integrals/tests/test_transforms.py | Python | mit | 31,213 | 0.002531 |
######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# MAKE GAIN TABLE FOR FLUX DENSITY BOOTSTRAPPING
# Make a gain table that includes gain and opacity corrections for final
# amp cal, for flux density bootstrapping
logprint ("Starting EVLA_pipe_fluxgains.py", logfileout='logs/fluxgains.log')
time_list=runtiming('fluxgains', 'start')
QA2_fluxgains='Pass'
#logprint ("Making fresh calibrators.ms", logfileout='logs/fluxgains.log')
#
#syscommand='rm -rf calibrators.ms'
#os.system(syscommand)
#
#default('split')
#vis=ms_active
#outputvis='calibrators.ms'
#datacolumn='corrected'
#field=''
#spw=''
#width=int(max(channels))
#antenna=''
#timebin='0s'
#timerange=''
#scan=calibrator_scan_select_string
#intent=''
#array=''
#uvrange=''
#correlation=''
#observation=''
#keepflags=False
#split()
logprint ("Setting models for standard primary calibrators", logfileout='logs/fluxgains.log')
tb.open('calibrators.ms')
positions = []
for ii in range(0,len(field_positions[0][0])):
positions.append([field_positions[0][0][ii], field_positions[1][0][ii]])
standard_source_names = [ '3C48', '3C138', '3C147', '3C286' ]
standard_source_fields = find_standards(positions)
ii=0
for fields in standard_source_fields:
for myfield in fields:
spws = field_spws[myfield]
for myspw in spws:
reference_frequency = center_frequencies[myspw]
EVLA_band = find_EVLA_band(reference_frequency)
logprint ("Center freq for spw "+str(myspw)+" = "+str(reference_frequency)+", observing band = "+EVLA_band, logfileout='logs/fluxgains.log')
model_image = standard_source_names[ii]+'_'+EVLA_band+'.im'
logprint ("Setting model for field "+str(myfield)+" spw "+str(myspw)+" using "+model_image, logfileout='logs/fluxgains.log')
try:
default('setjy')
vis='calibrators.ms'
field=str(myfield)
spw=str(myspw)
selectdata=False
scalebychan=True
standard='Perley-Butler 2013'
model=model_image
listmodels=False
usescratch=scratch
setjy()
except:
logprint('no data found for field ' + str(myfield)+" spw "+str(myspw), logfileout='logs/fluxgains.log')
ii=ii+1
tb.close()
logprint ("Making gain tables for flux density bootstrapping", logfileout='logs/fluxgains.log')
logprint ("Short solint = "+new_gain_solint1, logfileout='logs/fluxgains.log')
logprint ("Long solint = "+gain_solint2, logfileout='logs/fluxgains.log')
print ""
print "Finding a reference antenna"
print ""
refantspw=''
refantfield=calibrator_field_select_string
findrefant=RefAntHeuristics(vis='calibrators.ms',field=refantfield,geometry=True,flagging=True)
RefAntOutput=findrefant.calculate()
refAnt=str(RefAntOutput[0])+','+str(RefAntOutput[1])+','+str(RefAntOutput[2])+','+str(RefAntOutput[3])
logprint ("The pipeline will use antenna(s) "+refAnt+" as the reference", logfileout='logs/fluxgains.log')
# Derive amp gain table. Note that gaincurves and opacity
# corrections have already been applied during applycal and split in
# semiFinalBPdcals/solint.py.
# Need to add check for 3C84 in here, when heuristics have been sorted out
default('gaincal')
vis='calibrators.ms'
caltable='fluxphaseshortgaincal.g'
field=''
spw=''
intent=''
selectdata=False
solint=new_gain_solint1
combine='scan'
preavg=-1.0
refant=refAnt
minblperant=minBL_for_cal
minsnr=3.0
solnorm=False
gaintype='G'
smodel=[]
calmode='p'
append=False
docallib=False
#gaintable=filter(None, [priorcals,'delay.k','BPcal.b'])
gaintable=['']
gainfield=['']
interp=['']
spwmap=[]
parang=False
gaincal()
default('gaincal')
vis='calibrators.ms'
caltable='fluxgaincal.g'
field=''
spw=''
intent=''
selectdata=False
solint=gain_solint2
combine='scan'
preavg=-1.0
refant=refAnt
minblperant=minBL_for_cal
minsnr=5.0
solnorm=False
gaintype='G'
smodel=[]
calmode='ap'
append=False
docallib=False
#gaintable=filter(None, [priorcals,'delay.k','BPcal.b','fluxphaseshortgaincal.g'])
gaintable=['fluxphaseshortgaincal.g']
gainfield=['']
interp=['']
spwmap=[]
parang=False
gaincal()
logprint ("Gain table fluxgaincal.g is ready for flagging", logfileout='logs/fluxgains.log')
# Calculate fractions of flagged solutions for final QA2; note, can
# tolerate higher fraction of flagged solutions for this step than in
# other gain tables
flaggedGainSolns=getCalFlaggedSoln('fluxgaincal.g')
if (flaggedGainSolns['all']['total'] == 0):
QA2_fluxgains='Fail'
elif (flaggedGainSolns['antmedian']['fraction'] > 0.2):
QA2_fluxgains='Partial'
logprint ("QA2 score: "+QA2_fluxgains, logfileout='logs/fluxgains.log')
logprint ("Finished EVLA_pipe_fluxgains.py", logfileout='logs/fluxgains.log')
time_list=runtiming('fluxgains', 'end')
pipeline_save()
| e-koch/VLA_Lband | 16B/pipeline4.7.1_custom/EVLA_pipe_fluxgains.py | Python | mit | 6,065 | 0.019621 |
"""empty message
Revision ID: 059e2a9bfb4c
Revises:
Create Date: 2017-07-10 21:50:44.380938
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '059e2a9bfb4c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('user_uuid', postgresql.UUID(as_uuid=True), server_default=sa.text(u'uuid_generate_v4()'), nullable=False),
sa.Column('permissions_group', sa.String(), nullable=True),
sa.Column('ddw_access_token', sa.String(), nullable=True),
sa.Column('ddw_token_expires_in', sa.Integer(), nullable=True),
sa.Column('ddw_avatar_url', sa.String(), nullable=True),
sa.Column('ddw_display_name', sa.String(), nullable=True),
sa.Column('ddw_user_id', sa.String(), nullable=True),
sa.Column('ddw_user_created', sa.Date(), nullable=True),
sa.Column('ddw_user_updated', sa.Date(), nullable=True),
sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('user_uuid')
)
op.create_table('sessions',
sa.Column('session_uuid', postgresql.UUID(as_uuid=True), server_default=sa.text(u'uuid_generate_v4()'), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('user_uuid', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['user_uuid'], ['users.user_uuid'], ),
sa.PrimaryKeyConstraint('session_uuid')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('sessions')
op.drop_table('users')
# ### end Alembic commands ###
| hardworkingcoder/dw_experiments | migrations/versions/059e2a9bfb4c_.py | Python | mit | 1,851 | 0.010805 |
#!/usr/bin/python2.7
#
# setup_leveldb.py
#
# Compiles and install Minecraft Pocket Edtition binary support.
#
__author__ = "D.C.-G. 2017"
__version__ = "0.3.0"
import sys
import os
import platform
import fnmatch
if sys.platform != "linux2":
print "This script can't run on other platforms than Linux ones..."
sys.exit(1)
bin_deps = ('gcc', 'g++', 'unzip', 'wget|curl')
wget_curl = None
wget_cmd = "wget -q --no-check-certificate -O"
curl_cmd = "curl -LskS -o"
mojang_sources_url = "https://codeload.github.com/Mojang/leveldb-mcpe/zip/"
mojang_commit = "a056ea7c18dfd7b806d6d693726ce79d75543904"
jocopa3_sources_url = "https://codeload.github.com/jocopa3/leveldb-mcpe/zip/"
jocopa3_commit = "56bdd1f38dde7074426d85eab01a5c1c0b5b1cfe"
zlib_sources_url = "https://codeload.github.com/madler/zlib/zip/"
zlib_commit = "4a090adef8c773087ec8916ad3c2236ef560df27"
zlib_ideal_version = "1.2.10"
zlib_minimal_version = "1.2.8"
def check_bins(bins):
print 'Searching for the needed binaries %s...' % repr(bins).replace("'", '')
missing_bin = False
for name in bins:
names = []
if '|' in name:
names = name.split('|')
if names:
found = False
for n in names:
if not os.system('which %s > /dev/null' % n):
found = True
break
else:
print "Could not find %s." % n
if found:
g_keys = globals().keys()
g_name = name.replace('|', '_')
print "g_name", g_name, g_name in g_keys
if g_name in g_keys:
globals()[g_name] = globals()['%s_cmd' % n]
else:
print '*** WARNING: None of these binaries were found on your system: %s.'%', '.join(names)
else:
if os.system('which %s > /dev/null' % name):
print '*** WARNING: %s not found.' % name
missing_bin = True
if missing_bin:
a = raw_input('The binary dependencies are not satisfied. The build may fail.\nContinue [y/N]?')
if a and a in 'yY':
pass
else:
sys.exit()
else:
print 'All the needed binaries were found.'
# Picked from another project to find the lib and adapted to the need
import re
ARCH = {'32bit': '32', '64bit': '64'}[platform.architecture()[0]]
default_paths = ['/lib', '/lib32', '/lib64', '/usr/lib', '/usr/lib32','/usr/lib64',
'/usr/local/lib', os.path.expanduser('~/.local/lib'), '.']
# Gather the libraries paths.
def get_lib_paths(file_name):
paths = []
if os.path.isfile(file_name):
lines = [a.strip() for a in open(file_name).readlines()]
for i, line in enumerate(lines):
if not line.startswith('#') and line.strip():
if line.startswith('include'):
line = line.split(' ', 1)[1]
if '*' in line:
pat = r"%s" % line.split(os.path.sep)[-1].replace('.', '\.').replace('*', '.*')
d = os.path.split(line)[0]
if os.path.isdir(d):
for n in os.listdir(d):
r = re.findall(pat, n)
if r:
paths += [a for a in get_lib_paths(os.path.join(d, n)) if a not in paths]
else:
paths += [a for a in get_lib_paths(line) if not a in paths]
elif not line in paths and os.path.isdir(line):
paths.append(line)
return paths
def find_lib(lib_name, input_file='/etc/ld.so.conf'):
paths = default_paths + get_lib_paths(input_file)
arch_paths = []
other_paths = []
while paths:
path = paths.pop(0)
if ARCH in path:
arch_paths.insert(0, path)
elif path.endswith('/lib'):
arch_paths.append(path)
else:
other_paths.append(path)
paths = arch_paths + other_paths
found = None
r = None
ver = None
name = lib_name
hash_list = name.split('.')
hash_list.reverse()
idx = hash_list.index('so')
i = 0
while i <= idx and not found:
for path in paths:
print "Scanning %s for %s" % (path, name)
if os.path.exists(path):
for path, dirnames, filenames in os.walk(path):
if name in filenames:
found = os.path.join(path, name)
break
if found:
break
i += 1
name = name.rsplit('.', 1)[0]
cur_dir = os.getcwd()
os.chdir(path)
if found:
base_path = os.path.split(found)[0]
while os.path.islink(found):
found = os.readlink(found)
if not found.startswith("/"):
found = os.path.abspath(os.path.join(base_path, found))
# Verify the architecture of the library
inp, outp = os.popen2('file %s | grep "ELF %s"' % (found, ARCH))
r = bool(outp.read())
inp.close()
outp.close()
# If the architecture could not be check with library internal data, rely on the folder name.
if os.path.split(found)[0] in arch_paths:
r = True
v = found.rsplit('.so.', 1)
if len(v) == 2:
ver = v[1]
os.chdir(cur_dir)
return found, r, ver
def get_sources(name, url):
print "Downloading sources for %s" % name
print "URL: %s" % url
os.system("%s %s.zip %s" % (wget_curl, name, url))
print "Unpacking %s" % name
os.system("unzip -q %s.zip" % name)
os.system("mv $(ls -d1 */ | egrep '{n}-') {n}".format(n=name))
print "Cleaning archive."
os.remove("%s.zip" % name)
def build_zlib():
print "Building zlib..."
return os.WEXITSTATUS(os.system("./configure; make"))
def build_leveldb(zlib):
print "Building leveldb..."
# Looks like the '-lz' option has to be changed...
if zlib:
data = open('Makefile').read()
data = data.replace("LIBS += $(PLATFORM_LIBS) -lz", "LIBS += $(PLATFORM_LIBS) %s" % zlib)
open("Makefile", "w").write(data)
cpath = os.environ.get("CPATH")
if cpath:
os.environ["CPATH"] = "./zlib:$CPATH"
else:
os.environ["CPATH"] = "./zlib"
return os.WEXITSTATUS(os.system("make"))
def main():
print "=" * 72
print "Building Linux Minecraft Pocket Edition for MCEdit..."
print "-----------------------------------------------------"
global leveldb_commit
global zlib_commit
global zlib_sources_url
force_zlib = False
leveldb_source_url = mojang_sources_url
leveldb_commit = mojang_commit
cur_dir = os.getcwd()
if "--force-zlib" in sys.argv:
force_zlib = True
sys.argv.remove("--force-zlib")
if "--alt-leveldb" in sys.argv:
leveldb_source_url = jocopa3_sources_url
leveldb_commit = jocopa3_commit
for arg, var in (("--leveldb-commit", 'leveldb_commit'), ("--zlib-commit", 'zlib_commit')):
if arg in sys.argv:
globals()[var] = sys.argv[sys.argv.index(arg) + 1]
leveldb_source_url += leveldb_commit
zlib_sources_url += zlib_commit
check_bins(bin_deps)
# Get the sources here.
get_sources("leveldb", leveldb_source_url)
os.chdir("leveldb")
# os.rmdir("zlib")
get_sources("zlib", zlib_sources_url)
os.chdir(cur_dir)
zlib = (None, None, None)
# Check zlib
if not force_zlib:
print "Checking zlib."
zlib = find_lib("libz.so.%s" % zlib_ideal_version)
print zlib
if zlib == (None, None, None):
zlib = None
print "*** WARNING: zlib not found!"
print " It is recommended you install zlib v%s on your system or" % zlib_ideal_version
print " let this script install it only for leveldb."
print " Enter 'b' to build zlib v1.2.10 only for leveldb."
print " Enter 'a' to quit now and install zlib on your yourself."
print " It is recomended to use your package manager to install zlib."
a = ""
while a.lower() not in "abc":
a = raw_input("Build zlib [b] or abort [a]? ")
if a == "b":
force_zlib = True
elif a == "a":
sys.exit(1)
else:
err = False
if zlib[2] == None:
print "*** WARNING: zlib has been found, but the exact version could not be"
print " determined."
print " The sources for zlib v%s will be downloaded and the" % zlib_ideal_version
print " build will start."
print " If the build fails or the support does not work, install"
print " the version %s and retry. You may also install another" % zlib_ideal_version
print " version and retry with this one."
err = True
elif zlib[2] not in ("1.2.8", "1.2.10"):
print "*** WARNING: zlib was found, but its version is %s." % zlib[2]
print " You can try to build with this version, but it may fail,"
print " or the generated libraries may not work..."
err = True
if zlib[1] == False:
print "*** WARNING: zlib has been found on your system, but not for the"
print " current architecture."
print " You apparently run on a %s, and the found zlib is %s" % (ARCH, zlib[0])
print " Building the Pocket Edition support may fail. If not,"
print " the support may not work."
print " You can continue, but it is recommended to install zlib"
print " for your architecture."
err = True
if err:
a = raw_input("Continue [y/N]? ")
if a and a in "yY":
zlib = zlib[0]
else:
sys.exit(1)
else:
print "Found compliant zlib v%s." % zlib[2]
zlib = zlib[0]
if force_zlib:
os.chdir("leveldb/zlib")
r = build_zlib()
if r:
print "Zlib build failed."
return r
os.chdir(cur_dir)
os.rename("leveldb/zlib/libz.so.1.2.10", "./libz.so.1.2.10")
os.rename("leveldb/zlib/libz.so.1", "./libz.so.1")
os.rename("leveldb/zlib/libz.so", "./libz.so")
# Tweak the leveldb makefile to force the linker to use the built zlib
data = open("leveldb/Makefile").read()
data = data.replace("PLATFORM_SHARED_LDFLAGS", "PSL")
data = data.replace("LDFLAGS += $(PLATFORM_LDFLAGS)",
"LDFLAGS += $(PLATFORM_LDFLAGS)\nPSL = -L{d} -lz -Wl,-R{d} $(PLATFORM_SHARED_LDFLAGS)".format(d=cur_dir))
data = data.replace("LIBS += $(PLATFORM_LIBS) -lz", "LIBS += -L{d} -lz -Wl,-R{d} $(PLATFORM_LIBS)".format(d=cur_dir))
open("leveldb/Makefile", "w").write(data)
zlib = None
os.chdir("leveldb")
r = build_leveldb(zlib)
if r:
print "PE support build failed."
return r
os.chdir(cur_dir)
for root, d_names, f_names in os.walk("leveldb"):
for f_name in fnmatch.filter(f_names, "libleveldb.so*"):
os.rename(os.path.join(root, f_name), os.path.join(".", f_name))
print "Setup script ended."
if __name__ == "__main__":
sys.exit(main())
| fhfuih/MCEdit-Unified | pymclevel/setup_leveldb.py | Python | isc | 11,800 | 0.003136 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import GrafeasTransport
from .grpc import GrafeasGrpcTransport
from .grpc_asyncio import GrafeasGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[GrafeasTransport]]
_transport_registry["grpc"] = GrafeasGrpcTransport
_transport_registry["grpc_asyncio"] = GrafeasGrpcAsyncIOTransport
__all__ = (
"GrafeasTransport",
"GrafeasGrpcTransport",
"GrafeasGrpcAsyncIOTransport",
)
| googleapis/python-grafeas | grafeas/grafeas_v1/services/grafeas/transports/__init__.py | Python | apache-2.0 | 1,131 | 0 |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.db.models import Q
# from djangobmf.permissions import ModulePermission
from djangobmf.utils import FilterQueryset
class GoalFilter(FilterQueryset):
def filter_queryset(self, qs, user):
if user.has_perm('%s.can_manage' % qs.model._meta.app_label, qs.model):
return qs
qs_filter = Q(referee=user.djangobmf.employee or -1)
qs_filter |= Q(employees=user.djangobmf.employee or -1)
qs_filter |= Q(team__in=user.djangobmf.team)
if hasattr(qs.model, "project"): # pragma: no branch
project = qs.model._meta.get_field_by_name("project")[0].model
if user.has_perm('%s.can_manage' % project._meta.app_label, project):
qs_filter |= Q(project__isnull=False)
else:
qs_filter |= Q(project__isnull=False, project__employees=user.djangobmf.employee or -1)
qs_filter |= Q(project__isnull=False, project__team__in=user.djangobmf.team)
return qs.filter(qs_filter)
class TaskFilter(FilterQueryset):
def filter_queryset(self, qs, user):
qs_filter = Q(project__isnull=True, goal__isnull=True)
qs_filter |= Q(employee=user.djangobmf.employee or -1)
qs_filter |= Q(in_charge=user.djangobmf.employee)
if hasattr(qs.model, "goal"): # pragma: no branch
goal = qs.model._meta.get_field_by_name("goal")[0].model
if user.has_perm('%s.can_manage' % goal._meta.app_label, goal):
qs_filter |= Q(goal__isnull=False)
else:
qs_filter |= Q(goal__isnull=False, goal__referee=user.djangobmf.employee or -1)
qs_filter |= Q(goal__isnull=False, goal__employees=user.djangobmf.employee or -1)
qs_filter |= Q(goal__isnull=False, goal__team__in=user.djangobmf.team)
if hasattr(qs.model, "project"): # pragma: no branch
project = qs.model._meta.get_field_by_name("project")[0].model
if user.has_perm('%s.can_manage' % project._meta.app_label, project):
qs_filter |= Q(project__isnull=False)
else:
qs_filter |= Q(project__isnull=False, project__employees=user.djangobmf.employee or -1)
qs_filter |= Q(project__isnull=False, project__team__in=user.djangobmf.team)
return qs.filter(qs_filter)
| django-bmf/django-bmf | djangobmf/contrib/task/permissions.py | Python | bsd-3-clause | 2,444 | 0.003682 |
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Commonly-required utility methods needed by -- and potentially
customized by -- application and toolkit scripts. They have
been pulled out from the scripts because certain scripts had
gotten way too large as a result of including these methods."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.debug as debug
import orca.orca_state as orca_state
import orca.script_utilities as script_utilities
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(script_utilities.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
script_utilities.Utilities.__init__(self, script)
#########################################################################
# #
# Utilities for finding, identifying, and comparing accessibles #
# #
#########################################################################
def displayedText(self, obj):
"""Returns the text being displayed for an object. Overridden here
because OpenOffice uses symbols (e.g. ">>" for buttons but exposes
more useful information via the accessible's name.
Arguments:
- obj: the object
Returns the text being displayed for an object or None if there isn't
any text being shown.
"""
try:
role = obj.getRole()
except:
return ""
if role == pyatspi.ROLE_PUSH_BUTTON and obj.name:
return obj.name
if role == pyatspi.ROLE_TABLE_CELL:
strings = list(map(self.displayedText, [child for child in obj]))
text = "\n".join(strings)
if text.strip():
return text
try:
text = super().displayedText(obj)
except:
return ""
# TODO - JD: This is needed because the default behavior is to fall
# back on the name, which is bogus. Once that has been fixed, this
# hack can go.
if role == pyatspi.ROLE_TABLE_CELL and text == obj.name \
and (self.isSpreadSheetCell(obj) or self.isTextDocumentCell(obj)):
return ""
return text
def isTextArea(self, obj):
return obj and obj.getRole() == pyatspi.ROLE_TEXT
def isCellBeingEdited(self, obj):
if not obj:
return False
parent = obj.parent
if parent and parent.getRoleName() == 'text frame':
if self.spreadSheetCellName(parent):
return True
return False
def spreadSheetCellName(self, cell):
nameList = cell.name.split()
for name in nameList:
name = name.replace('.', '')
if not name.isalpha() and name.isalnum():
return name
return ''
def getRowColumnAndTable(self, cell):
"""Returns the (row, column, table) tuple for cell."""
if not (cell and cell.getRole() == pyatspi.ROLE_TABLE_CELL):
return -1, -1, None
cellParent = cell.parent
if cellParent and cellParent.getRole() == pyatspi.ROLE_TABLE_CELL:
cell = cellParent
cellParent = cell.parent
table = cellParent
if table and table.getRole() != pyatspi.ROLE_TABLE:
table = table.parent
try:
iTable = table.queryTable()
except:
return -1, -1, None
index = self.cellIndex(cell)
row = iTable.getRowAtIndex(index)
column = iTable.getColumnAtIndex(index)
return row, column, table
def getShowingCellsInRow(self, obj):
row, column, parentTable = self.getRowColumnAndTable(obj)
try:
table = parentTable.queryTable()
except:
return []
startIndex, endIndex = self.getTableRowRange(obj)
cells = []
for i in range(startIndex, endIndex):
cell = table.getAccessibleAt(row, i)
try:
showing = cell.getState().contains(pyatspi.STATE_SHOWING)
except:
continue
if showing:
cells.append(cell)
return cells
def getTableRowRange(self, obj):
"""If this is spread sheet cell, return the start and end indices
of the spread sheet cells for the table that obj is in. Otherwise
return the complete range (0, parentTable.nColumns).
Arguments:
- obj: a table cell.
Returns the start and end table cell indices.
"""
parent = obj.parent
try:
parentTable = parent.queryTable()
except:
return [-1, -1]
startIndex = 0
endIndex = parentTable.nColumns
if self.isSpreadSheetCell(obj):
extents = parent.queryComponent().getExtents(pyatspi.DESKTOP_COORDS)
y = extents.y
leftX = extents.x + 1
leftCell = \
parent.queryComponent().getAccessibleAtPoint(leftX, y, 0)
if leftCell:
table = leftCell.parent.queryTable()
index = self.cellIndex(leftCell)
startIndex = table.getColumnAtIndex(index)
rightX = extents.x + extents.width - 1
rightCell = \
parent.queryComponent().getAccessibleAtPoint(rightX, y, 0)
if rightCell:
table = rightCell.parent.queryTable()
index = self.cellIndex(rightCell)
endIndex = table.getColumnAtIndex(index) + 1
return [startIndex, endIndex]
def rowHeadersForCell(self, obj):
rowHeader, colHeader = self.getDynamicHeadersForCell(obj)
if rowHeader:
return [rowHeader]
return super().rowHeadersForCell(obj)
def columnHeadersForCell(self, obj):
rowHeader, colHeader = self.getDynamicHeadersForCell(obj)
if colHeader:
return [colHeader]
return super().columnHeadersForCell(obj)
def getDynamicHeadersForCell(self, obj, onlyIfNew=False):
if not (self._script.dynamicRowHeaders or self._script.dynamicColumnHeaders):
return None, None
objRow, objCol, table = self.getRowColumnAndTable(obj)
if not table:
return None, None
headersRow = self._script.dynamicColumnHeaders.get(hash(table))
headersCol = self._script.dynamicRowHeaders.get(hash(table))
if headersRow == objRow or headersCol == objCol:
return None, None
getRowHeader = headersCol != None
getColHeader = headersRow != None
if onlyIfNew:
getRowHeader = \
getRowHeader and objRow != self._script.pointOfReference.get("lastRow")
getColHeader = \
getColHeader and objCol!= self._script.pointOfReference.get("lastColumn")
parentTable = table.queryTable()
rowHeader, colHeader = None, None
if getColHeader:
colHeader = parentTable.getAccessibleAt(headersRow, objCol)
if getRowHeader:
rowHeader = parentTable.getAccessibleAt(objRow, headersCol)
return rowHeader, colHeader
def isSameObject(self, obj1, obj2, comparePaths=False, ignoreNames=False):
same = super().isSameObject(obj1, obj2, comparePaths, ignoreNames)
if not same or obj1 == obj2:
return same
# The document frame currently contains just the active page,
# resulting in false positives. So for paragraphs, rely upon
# the equality check.
if obj1.getRole() == obj2.getRole() == pyatspi.ROLE_PARAGRAPH:
return False
# Handle the case of false positives in dialog boxes resulting
# from getIndexInParent() returning a bogus value. bgo#618790.
#
if not obj1.name \
and obj1.getRole() == pyatspi.ROLE_TABLE_CELL \
and obj1.getIndexInParent() == obj2.getIndexInParent() == -1:
top = self.topLevelObject(obj1)
if top and top.getRole() == pyatspi.ROLE_DIALOG:
same = False
return same
def isLayoutOnly(self, obj):
"""Returns True if the given object is a container which has
no presentable information (label, name, displayed text, etc.)."""
try:
role = obj.getRole()
childCount = obj.childCount
except:
role = None
childCount = 0
if role == pyatspi.ROLE_PANEL and childCount == 1:
if obj.name and obj.name == obj[0].name:
return True
if role == pyatspi.ROLE_LIST \
and obj.parent.getRole() == pyatspi.ROLE_COMBO_BOX:
return True
return script_utilities.Utilities.isLayoutOnly(self, obj)
def locateInputLine(self, obj):
"""Return the spread sheet input line. This only needs to be found
the very first time a spread sheet table cell gets focus. We use the
table cell to work back up the component hierarchy until we have found
the common panel that both it and the input line reside in. We then
use that as the base component to search for a component which has a
paragraph role. This will be the input line.
Arguments:
- obj: the spread sheet table cell that has just got focus.
Returns the spread sheet input line component.
"""
if self._script.inputLineForCell:
return self._script.inputLineForCell
isScrollPane = lambda x: x and x.getRole() == pyatspi.ROLE_SCROLL_PANE
scrollPane = pyatspi.findAncestor(obj, isScrollPane)
if not scrollPane:
return None
toolbar = None
for child in scrollPane.parent:
if child and child.getRole() == pyatspi.ROLE_TOOL_BAR:
toolbar = child
break
if not toolbar:
msg = "ERROR: Calc inputline toolbar not found."
debug.println(debug.LEVEL_INFO, msg, True)
return
isParagraph = lambda x: x and x.getRole() == pyatspi.ROLE_PARAGRAPH
allParagraphs = pyatspi.findAllDescendants(toolbar, isParagraph)
if len(allParagraphs) == 1:
self._script.inputLineForCell = allParagraphs[0]
return self._script.inputLineForCell
def frameAndDialog(self, obj):
"""Returns the frame and (possibly) the dialog containing
the object. Overridden here for presentation of the title
bar information: If the locusOfFocus is a spreadsheet cell,
1) we are not in a dialog and 2) we need to present both the
frame name and the sheet name. So we might as well return the
sheet in place of the dialog so that the default code can do
its thing.
"""
if not self.isSpreadSheetCell(obj):
return script_utilities.Utilities.frameAndDialog(self, obj)
results = [None, None]
parent = obj.parent
while parent and (parent.parent != parent):
if parent.getRole() == pyatspi.ROLE_FRAME:
results[0] = parent
if parent.getRole() == pyatspi.ROLE_TABLE:
results[1] = parent
parent = parent.parent
return results
def isFunctionalDialog(self, obj):
"""Returns true if the window is functioning as a dialog."""
# The OOo Navigator window looks like a dialog, acts like a
# dialog, and loses focus requiring the user to know that it's
# there and needs Alt+F6ing into. But officially it's a normal
# window.
# There doesn't seem to be (an efficient) top-down equivalent
# of utilities.hasMatchingHierarchy(). But OOo documents have
# root panes; this thing does not.
#
rolesList = [pyatspi.ROLE_FRAME,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_PUSH_BUTTON]
if obj.getRole() != rolesList[0]:
# We might be looking at the child.
#
rolesList.pop(0)
while obj and obj.childCount and len(rolesList):
if obj.getRole() != rolesList.pop(0):
return False
obj = obj[0]
return True
def validParent(self, obj):
"""Returns the first valid parent/ancestor of obj. We need to do
this in some applications and toolkits due to bogus hierarchies.
See bugs:
http://www.openoffice.org/issues/show_bug.cgi?id=78117
http://bugzilla.gnome.org/show_bug.cgi?id=489490
Arguments:
- obj: the Accessible object
"""
parent = obj.parent
if parent and parent.getRole() in (pyatspi.ROLE_ROOT_PANE,
pyatspi.ROLE_DIALOG):
app = obj.getApplication()
for frame in app:
if frame.childCount < 1 \
or frame[0].getRole() not in (pyatspi.ROLE_ROOT_PANE,
pyatspi.ROLE_OPTION_PANE):
continue
root_pane = frame[0]
if obj in root_pane:
return root_pane
return parent
def findPreviousObject(self, obj):
"""Finds the object before this one."""
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_FROM:
return relation.getTarget(0)
index = obj.getIndexInParent() - 1
if not (0 <= index < obj.parent.childCount - 1):
obj = obj.parent
index = obj.getIndexInParent() - 1
try:
prevObj = obj.parent[index]
except:
prevObj = obj
return prevObj
def findNextObject(self, obj):
"""Finds the object after this one."""
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_TO:
return relation.getTarget(0)
index = obj.getIndexInParent() + 1
if not (0 < index < obj.parent.childCount):
obj = obj.parent
index = obj.getIndexInParent() + 1
try:
nextObj = obj.parent[index]
except:
nextObj = None
return nextObj
@staticmethod
def _flowsFromOrToSelection(obj):
try:
relationSet = obj.getRelationSet()
except:
return False
flows = [pyatspi.RELATION_FLOWS_FROM, pyatspi.RELATION_FLOWS_TO]
relations = filter(lambda r: r.getRelationType() in flows, relationSet)
targets = [r.getTarget(0) for r in relations]
for target in targets:
try:
nSelections = target.queryText().getNSelections()
except:
return False
if nSelections:
return True
return False
#########################################################################
# #
# Impress-Specific Utilities #
# #
#########################################################################
def drawingView(self, obj=orca_state.locusOfFocus):
"""Attempts to locate the Impress drawing view, which is the
area in which slide editing occurs."""
return pyatspi.findDescendant(self.topLevelObject(obj), self.isDrawingView)
def isDrawingView(self, obj):
"""Returns True if obj is the Impress Drawing View."""
if obj and obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
return (":" in obj.name and "/" in obj.name)
return False
def isInImpress(self, obj=orca_state.locusOfFocus):
"""Returns True if obj is in OOo Impress."""
# Having checked English, Spanish, and Arabic, it would seem
# that the Frame name will end with "Impress", unlocalized.
#
if obj:
topLevel = self.topLevelObject(obj)
if topLevel and not self.isZombie(topLevel) \
and topLevel.name.endswith("Impress"):
return True
return False
def slideAndTaskPanes(self, obj=orca_state.locusOfFocus):
"""Attempts to locate the Impress slide pane and task pane."""
drawingView = self.drawingView(obj)
if not drawingView:
return None, None
parent = drawingView.parent
if parent:
parent = parent.parent
if not parent:
return None, None
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_SPLIT_PANE
panes = pyatspi.findAllDescendants(parent, hasRole)
if not panes:
return None, None
slidePane = taskPane = None
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_DOCUMENT_FRAME
if pyatspi.findAllDescendants(panes[0], hasRole):
slidePane = panes[0]
if len(panes) == 2:
taskPane = panes[1]
else:
taskPane = panes[0]
if len(panes) == 2:
slidePane = panes[1]
return slidePane, taskPane
def slideTitleAndPosition(self, obj):
"""Attempts to obtain the title, position of the slide which contains
or is represented by obj.
Returns a (title, position, count) tuple.
"""
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
dv = obj
else:
dv = self.ancestorWithRole(obj, [pyatspi.ROLE_DOCUMENT_FRAME], [])
if not dv or not self.isDrawingView(dv):
return "", 0, 0
positionAndCount = dv.name.split(":")[1]
position, count = positionAndCount.split("/")
title = ""
for child in dv:
if not child.childCount:
continue
# We want an actual Title.
#
if child.name.startswith("ImpressTitle"):
title = self.displayedText(child[0])
break
# But we'll live with a Subtitle if we can't find a title.
# Unlike Titles, a single subtitle can be made up of multiple
# accessibles.
#
elif child.name.startswith("ImpressSubtitle"):
for line in child:
title = self.appendString(title, self.displayedText(line))
return title, int(position), int(count)
#########################################################################
# #
# Miscellaneous Utilities #
# #
#########################################################################
def isAutoTextEvent(self, event):
"""Returns True if event is associated with text being autocompleted
or autoinserted or autocorrected or autosomethingelsed.
Arguments:
- event: the accessible event being examined
"""
if event.source.getRole() != pyatspi.ROLE_PARAGRAPH:
return False
lastKey, mods = self.lastKeyAndModifiers()
if event.type.startswith("object:text-changed:insert"):
if not event.any_data:
return False
if lastKey == "Tab" and event.any_data != "\t":
return True
if lastKey in ["BackSpace", "ISO_Left_Tab"]:
return True
if event.type.startswith("focus:"):
if lastKey == "Return":
try:
charCount = event.source.queryText().characterCount
except:
charCount = 0
return charCount > 0
return False
def selectedChildren(self, obj):
if not obj:
return []
# Things only seem broken for certain tables, e.g. the Paths table.
# TODO - JD: File the LibreOffice bugs and reference them here.
if obj.getRole() != pyatspi.ROLE_TABLE \
or self.isSpreadSheetCell(obj):
return script_utilities.Utilities.selectedChildren(self, obj)
try:
selection = obj.querySelection()
except:
return []
children = []
for i, child in enumerate(obj):
if selection.isChildSelected(i):
children.append(obj[i])
return children
| chrys87/orca-beep | src/orca/scripts/apps/soffice/script_utilities.py | Python | lgpl-2.1 | 22,259 | 0.001483 |
from django.conf.urls import url, include
from rest_framework import routers
from api import views
router = routers.DefaultRouter(trailing_slash=True)
router.register(
r'sysusers', views.DjangoUserViewSet, base_name="sysusers")
router.register(
r'sysgroups', views.DjangoGroupViewSet, base_name="sysgroups")
router.register(
r'comment', views.CommentViewSet, base_name="comment")
router.register(
r'submission', views.SubmissionViewSet, base_name="submission")
router.register(
r'codescheme', views.CodeSchemeViewSet, base_name="codescheme")
router.register(
r'code', views.CodeViewSet, base_name="code")
router.register(
r'commentcodeinstance',
views.CommentCodeInstanceViewSet,
base_name="commentcodeinstance")
router.register(
r'assignment', views.AssignmentViewSet, base_name="assignment")
router.register(
r'commentthread', views.CommentThreadViewSet, base_name="commentthread")
router.register(
r'codedcommentthread',
views.CodedCommentThreadViewSet,
base_name="codedcommentthread")
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'', include(router.urls, namespace='api')),
url(r'', include(
'rest_framework.urls', namespace='rest_framework')),
]
| geosoco/reddit_coding | api/urls.py | Python | bsd-3-clause | 1,309 | 0 |
#!/usr/bin/env python3
""" Program that convert a pdf to a text file using Tesseract OCR.
The pdf file is first converted to a png file using ghostscript,
then the png file if processed by Tesseract.
"""
import os
import subprocess
import glob
import platform
import argparse
parser = argparse.ArgumentParser(description='Convert pdf files to txt files in the given folder.')
parser.add_argument('folder',
help='folder where the pdf files are stored')
args = parser.parse_args()
input_dic = vars(args)
print('Selected pdf folder: ',input_dic['folder'])
PDF_PATH = input_dic['folder']
#PDF_PATH = '/media/benjamin/Elements/pdfs/'
def png_to_txt(pngpath,short_name,txtpath,log_file):
""" Extract the text from a set of png files.
The png files associated to a single pdf file are numbered according to the page,
they share the same short_name.
"""
png_in = os.path.join(pngpath,short_name)
# Iterate over the pages of the document (different png files)
for pngfile in glob.glob(png_in+'*'):
path,filename = os.path.split(pngfile)
txtfile = filename[0:-4] #+'.txt'
txt_out = os.path.join(txtpath,txtfile)
try:
cmd_png2txt = 'tesseract '+ pngfile +' '+txt_out+ ' -l fra+eng'
proc_results = subprocess.run(cmd_png2txt.split(), stdout=subprocess.PIPE,timeout=60)
if proc_results.returncode:
print('Error encountered with file: {}\n'.format(filename))
with open(log_file, 'a') as logfile:
logfile.write('Error with file: {}\n'.format(filename)) # report errors
else:
print('Text extracted form file: {}'.format(filename))
except:
print('error extracting text with file {}'.format(filename))
with open(log_file, 'a') as logfile:
logfile.write('Error with file (exception raised): {}\n'.format(filename)) # report errors
def pdf_to_png(pdf_file,short_name,png_path,page_limit=4):
""" Convert the pdf to png, each page of the pdf gives a different png file."""
out_name = short_name+'.%d.png'
out_file = os.path.join(png_path,out_name)
if platform.system() == 'Windows':
cmd_pdf2png = ('gswin32c -dSAFER -dNOPAUSE -q -r300x300 -sDEVICE=pnggray -dBATCH -dLastPage=' + str(page_limit) +
' -sOutputFile=' + out_file + ' ' + pdf_file)
else:
cmd_pdf2png = ('gs -dSAFER -dNOPAUSE -q -r300x300 -sDEVICE=pnggray -dBATCH -dLastPage=' + str(page_limit) +
' -sOutputFile=' + out_file + ' ' + pdf_file)
proc_results = subprocess.run(cmd_pdf2png.split(), stdout=subprocess.PIPE,timeout=60)
return proc_results
#PDF_PATH = '/media/benjamin/Elements/pdfs/'
LOG_FILE1 = 'logfile_pdf2png.txt'
LOG_FILE2 = 'logfile_png2txt.txt'
# initiate log file to report errors
with open(LOG_FILE1, 'a') as logfile:
logfile.write('Logfile produced by pdf2txt.py\n')
with open(LOG_FILE2, 'a') as logfile:
logfile.write('Logfile produced by pdf2txt.py\n')
# init paths
png_path = os.path.join(PDF_PATH,'png')
txt_path = os.path.join(PDF_PATH,'txt')
if not os.path.exists(png_path):
os.makedirs(png_path)
if not os.path.exists(txt_path):
os.makedirs(txt_path)
# Loop over all the file in the pdf folder
nb_files = len(list(glob.glob(os.path.join(PDF_PATH,'*.pdf'))))
for idx,pdf_file in enumerate(glob.glob(os.path.join(PDF_PATH,'*.pdf'))):
pdf_path,filename = os.path.split(pdf_file)
print('processing {}. File {}/{}.'.format(filename,idx+1,nb_files))
short_name = filename[0:-4]
try:
proc_results = pdf_to_png(pdf_file,short_name,png_path,page_limit=4)
if proc_results.returncode:
print('Error encountered with file: {}\n'.format(filename))
with open(LOG_FILE1, 'a') as logfile:
logfile.write('Error with file: {}\n'.format(filename)) # report errors
else:
png_to_txt(png_path,short_name,txt_path,LOG_FILE2)
except subprocess.TimeoutExpired:
print('!!!!!! Timed out for file {} !!!!!!'.format(filename))
with open(LOG_FILE1, 'a') as logfile:
logfile.write('Timed out with file: {}\n'.format(filename)) # report time out
| bricaud/OCR-classif | pdf2txt.py | Python | apache-2.0 | 3,948 | 0.029889 |
#coding=utf-8
import sys
import os
import time
from shutil import *
def backup_db(dirname):
assert os.path.isdir(dirname),"not dirname"
for root,dirs,filenames in os.walk(dirname):
#print root,dirs,filenames
for filename in filenames:
filename = os.path.join(root,filename)
if filename.endswith(".rdb") or filename.endswith("aof"):
now=time.strftime("%Y%m%d%H%M%S",time.localtime())
backup_filename = "%s_%s.bak" % (filename,now)
tmp_filename = backup_filename + ".tmp"
copy2(filename,tmp_filename) # preserve attr
copy2(tmp_filename,backup_filename)
os.remove(tmp_filename)
break
if __name__ == "__main__":
if len(sys.argv) != 2:
print "backup_db arguments != 2"
exit(0)
dirname = sys.argv[1]
backup_db(dirname)
| sundream/shell | backup_db.py | Python | gpl-2.0 | 896 | 0.014509 |
import logging
import socket
import ssl
import struct
import warnings
import zlib
import io
import os
import platform
from functools import wraps
from threading import local as thread_local
from .rencode import dumps, loads
DEFAULT_LINUX_CONFIG_DIR_PATH = '~/.config/deluge'
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
MESSAGE_HEADER_SIZE = 5
READ_SIZE = 10
logger = logging.getLogger(__name__)
class DelugeClientException(Exception):
"""Base exception for all deluge client exceptions"""
class ConnectionLostException(DelugeClientException):
pass
class CallTimeoutException(DelugeClientException):
pass
class InvalidHeaderException(DelugeClientException):
pass
class FailedToReconnectException(DelugeClientException):
pass
class RemoteException(DelugeClientException):
pass
class DelugeRPCClient(object):
timeout = 20
def __init__(self, host, port, username, password, decode_utf8=False, automatic_reconnect=True):
self.host = host
self.port = port
self.username = username
self.password = password
self.deluge_version = None
# This is only applicable if deluge_version is 2
self.deluge_protocol_version = None
self.decode_utf8 = decode_utf8
if not self.decode_utf8:
warnings.warn('Using `decode_utf8=False` is deprecated, please set it to True.'
'The argument will be removed in a future release where it will be always True', DeprecationWarning)
self.automatic_reconnect = automatic_reconnect
self.request_id = 1
self.connected = False
self._create_socket()
def _create_socket(self, ssl_version=None):
if ssl_version is not None:
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), ssl_version=ssl_version)
else:
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self._socket.settimeout(self.timeout)
def connect(self):
"""
Connects to the Deluge instance.
"""
self._connect()
logger.debug('Connected to Deluge, detecting daemon version')
self._detect_deluge_version()
logger.debug('Daemon version {} detected, logging in'.format(self.deluge_version))
if self.deluge_version == 2:
result = self.call('daemon.login', self.username, self.password, client_version='deluge-client')
else:
result = self.call('daemon.login', self.username, self.password)
logger.debug('Logged in with value %r' % result)
self.connected = True
def _connect(self):
logger.info('Connecting to %s:%s' % (self.host, self.port))
try:
self._socket.connect((self.host, self.port))
except ssl.SSLError as e:
# Note: have not verified that we actually get errno 258 for this error
if (hasattr(ssl, 'PROTOCOL_SSLv3') and
(getattr(e, 'reason', None) == 'UNSUPPORTED_PROTOCOL' or e.errno == 258)):
logger.warning('Was unable to ssl handshake, trying to force SSLv3 (insecure)')
self._create_socket(ssl_version=ssl.PROTOCOL_SSLv3)
self._socket.connect((self.host, self.port))
else:
raise
def disconnect(self):
"""
Disconnect from deluge
"""
if self.connected:
self._socket.close()
self._socket = None
self.connected = False
def _detect_deluge_version(self):
if self.deluge_version is not None:
return
self._send_call(1, None, 'daemon.info')
self._send_call(2, None, 'daemon.info')
self._send_call(2, 1, 'daemon.info')
result = self._socket.recv(1)
if result[:1] == b'D':
# This is a protocol deluge 2.0 was using before release
self.deluge_version = 2
self.deluge_protocol_version = None
# If we need the specific version of deluge 2, this is it.
daemon_version = self._receive_response(2, None, partial_data=result)
elif ord(result[:1]) == 1:
self.deluge_version = 2
self.deluge_protocol_version = 1
# If we need the specific version of deluge 2, this is it.
daemon_version = self._receive_response(2, 1, partial_data=result)
else:
self.deluge_version = 1
# Deluge 1 doesn't recover well from the bad request. Re-connect the socket.
self._socket.close()
self._create_socket()
self._connect()
def _send_call(self, deluge_version, protocol_version, method, *args, **kwargs):
self.request_id += 1
if method == 'daemon.login':
debug_args = list(args)
if len(debug_args) >= 2:
debug_args[1] = '<password hidden>'
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, debug_args, kwargs))
else:
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, args, kwargs))
req = ((self.request_id, method, args, kwargs), )
req = zlib.compress(dumps(req))
if deluge_version == 2:
if protocol_version is None:
# This was a protocol for deluge 2 before they introduced protocol version numbers
self._socket.send(b'D' + struct.pack("!i", len(req)))
elif protocol_version == 1:
self._socket.send(struct.pack('!BI', protocol_version, len(req)))
else:
raise Exception('Deluge protocol version {} is not (yet) supported.'.format(protocol_version))
self._socket.send(req)
def _receive_response(self, deluge_version, protocol_version, partial_data=b''):
expected_bytes = None
data = partial_data
while True:
try:
d = self._socket.recv(READ_SIZE)
except ssl.SSLError:
raise CallTimeoutException()
data += d
if deluge_version == 2:
if expected_bytes is None:
if len(data) < 5:
continue
header = data[:MESSAGE_HEADER_SIZE]
data = data[MESSAGE_HEADER_SIZE:]
if protocol_version is None:
if header[0] != b'D'[0]:
raise InvalidHeaderException('Expected D as first byte in reply')
elif ord(header[:1]) != protocol_version:
raise InvalidHeaderException(
'Expected protocol version ({}) as first byte in reply'.format(protocol_version)
)
if protocol_version is None:
expected_bytes = struct.unpack('!i', header[1:])[0]
else:
expected_bytes = struct.unpack('!I', header[1:])[0]
if len(data) >= expected_bytes:
data = zlib.decompress(data)
break
else:
try:
data = zlib.decompress(data)
except zlib.error:
if not d:
raise ConnectionLostException()
continue
break
data = list(loads(data, decode_utf8=self.decode_utf8))
msg_type = data.pop(0)
request_id = data.pop(0)
if msg_type == RPC_ERROR:
if self.deluge_version == 2:
exception_type, exception_msg, _, traceback = data
# On deluge 2, exception arguments are sent as tuple
if self.decode_utf8:
exception_msg = ', '.join(exception_msg)
else:
exception_msg = b', '.join(exception_msg)
else:
exception_type, exception_msg, traceback = data[0]
if self.decode_utf8:
exception = type(str(exception_type), (RemoteException, ), {})
exception_msg = '%s\n%s' % (exception_msg,
traceback)
else:
exception = type(str(exception_type.decode('utf-8', 'ignore')), (RemoteException, ), {})
exception_msg = '%s\n%s' % (exception_msg.decode('utf-8', 'ignore'),
traceback.decode('utf-8', 'ignore'))
raise exception(exception_msg)
elif msg_type == RPC_RESPONSE:
retval = data[0]
return retval
def reconnect(self):
"""
Reconnect
"""
self.disconnect()
self._create_socket()
self.connect()
def call(self, method, *args, **kwargs):
"""
Calls an RPC function
"""
tried_reconnect = False
for _ in range(2):
try:
self._send_call(self.deluge_version, self.deluge_protocol_version, method, *args, **kwargs)
return self._receive_response(self.deluge_version, self.deluge_protocol_version)
except (socket.error, ConnectionLostException, CallTimeoutException):
if self.automatic_reconnect:
if tried_reconnect:
raise FailedToReconnectException()
else:
try:
self.reconnect()
except (socket.error, ConnectionLostException, CallTimeoutException):
raise FailedToReconnectException()
tried_reconnect = True
else:
raise
def __getattr__(self, item):
return RPCCaller(self.call, item)
def __enter__(self):
"""Connect to client while using with statement."""
self.connect()
return self
def __exit__(self, type, value, traceback):
"""Disconnect from client at end of with statement."""
self.disconnect()
class RPCCaller(object):
def __init__(self, caller, method=''):
self.caller = caller
self.method = method
def __getattr__(self, item):
return RPCCaller(self.caller, self.method+'.'+item)
def __call__(self, *args, **kwargs):
return self.caller(self.method, *args, **kwargs)
class LocalDelugeRPCClient(DelugeRPCClient):
"""Client with auto discovery for the default local credentials"""
def __init__(
self,
host='127.0.0.1',
port=58846,
username='',
password='',
decode_utf8=True,
automatic_reconnect=True
):
if (
host in ('localhost', '127.0.0.1', '::1') and
not username and not password
):
username, password = self._get_local_auth()
super(LocalDelugeRPCClient, self).__init__(
host, port, username, password, decode_utf8, automatic_reconnect
)
def _cache_thread_local(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper.cache, 'result'):
wrapper.cache.result = func(*args, **kwargs)
return wrapper.cache.result
wrapper.cache = thread_local()
return wrapper
@_cache_thread_local
def _get_local_auth(self):
auth_path = local_username = local_password = ''
os_family = platform.system()
if 'Windows' in os_family or 'CYGWIN' in os_family:
app_data_path = os.environ.get('APPDATA')
auth_path = os.path.join(app_data_path, 'deluge', 'auth')
elif 'Linux' in os_family:
config_path = os.path.expanduser(DEFAULT_LINUX_CONFIG_DIR_PATH)
auth_path = os.path.join(config_path, 'auth')
if os.path.exists(auth_path):
with io.open(auth_path, 'r', encoding='utf-8') as f:
for line in f:
if not line or line.startswith('#'):
continue
auth_data = line.split(':')
if len(auth_data) < 2:
continue
username, password = auth_data[:2]
if username == 'localclient':
local_username, local_password = username, password
break
return local_username, local_password
| pymedusa/Medusa | ext/deluge_client/client.py | Python | gpl-3.0 | 12,558 | 0.00223 |
# Generated by Django 2.1.7 on 2019-04-30 13:20
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='publishablemodel',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| flavoi/diventi | diventi/core/migrations/0002_auto_20190430_1520.py | Python | apache-2.0 | 446 | 0.002242 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
A utility to make a best guess if a person is alive. This is used to provide
privacy in reports and exports.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".gen.utils.alive")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..display.name import displayer as name_displayer
from ..lib.date import Date, Today
from ..errors import DatabaseError
from ..ggettext import sgettext as _
#-------------------------------------------------------------------------
#
# Constants from config .ini keys
#
#-------------------------------------------------------------------------
# cache values; use refresh_constants() if they change
try:
from ..config import config
_MAX_AGE_PROB_ALIVE = config.get('behavior.max-age-prob-alive')
_MAX_SIB_AGE_DIFF = config.get('behavior.max-sib-age-diff')
_AVG_GENERATION_GAP = config.get('behavior.avg-generation-gap')
except ImportError:
# Utils used as module not part of GRAMPS
_MAX_AGE_PROB_ALIVE = 110
_MAX_SIB_AGE_DIFF = 20
_AVG_GENERATION_GAP = 20
#-------------------------------------------------------------------------
#
# ProbablyAlive class
#
#-------------------------------------------------------------------------
class ProbablyAlive(object):
"""
An object to hold the parameters for considering someone alive.
"""
def __init__(self,
db,
max_sib_age_diff=None,
max_age_prob_alive=None,
avg_generation_gap=None):
self.db = db
if max_sib_age_diff is None:
max_sib_age_diff = _MAX_SIB_AGE_DIFF
if max_age_prob_alive is None:
max_age_prob_alive = _MAX_AGE_PROB_ALIVE
if avg_generation_gap is None:
avg_generation_gap = _AVG_GENERATION_GAP
self.MAX_SIB_AGE_DIFF = max_sib_age_diff
self.MAX_AGE_PROB_ALIVE = max_age_prob_alive
self.AVG_GENERATION_GAP = avg_generation_gap
def probably_alive_range(self, person, is_spouse=False):
# FIXME: some of these computed dates need to be a span. For
# example, if a person could be born +/- 20 yrs around
# a date then it should be a span, and yr_offset should
# deal with it as well ("between 1920 and 1930" + 10 =
# "between 1930 and 1940")
if person is None:
return (None, None, "", None)
birth_ref = person.get_birth_ref()
death_ref = person.get_death_ref()
death_date = None
birth_date = None
explain = ""
# If the recorded death year is before current year then
# things are simple.
if death_ref and death_ref.get_role().is_primary():
if death_ref:
death = self.db.get_event_from_handle(death_ref.ref)
if death and death.get_date_object().get_start_date() != Date.EMPTY:
death_date = death.get_date_object()
# Look for Cause Of Death, Burial or Cremation events.
# These are fairly good indications that someone's not alive.
if not death_date:
for ev_ref in person.get_primary_event_ref_list():
if ev_ref:
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_death_fallback():
death_date = ev.get_date_object()
explain = _("death-related evidence")
# If they were born within X years before current year then
# assume they are alive (we already know they are not dead).
if not birth_date:
if birth_ref and birth_ref.get_role().is_primary():
birth = self.db.get_event_from_handle(birth_ref.ref)
if birth and birth.get_date_object().get_start_date() != Date.EMPTY:
birth_date = birth.get_date_object()
# Look for Baptism, etc events.
# These are fairly good indications that someone's birth.
if not birth_date:
for ev_ref in person.get_primary_event_ref_list():
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_birth_fallback():
birth_date = ev.get_date_object()
explain = _("birth-related evidence")
if not birth_date and death_date:
# person died more than MAX after current year
birth_date = death_date.copy_offset_ymd(year=-self.MAX_AGE_PROB_ALIVE)
explain = _("death date")
if not death_date and birth_date:
# person died more than MAX after current year
death_date = birth_date.copy_offset_ymd(year=self.MAX_AGE_PROB_ALIVE)
explain = _("birth date")
if death_date and birth_date:
return (birth_date, death_date, explain, person) # direct self evidence
# Neither birth nor death events are available. Try looking
# at siblings. If a sibling was born more than X years past,
# or more than Z future, then probably this person is
# not alive. If the sibling died more than X years
# past, or more than X years future, then probably not alive.
family_list = person.get_parent_family_handle_list()
for family_handle in family_list:
family = self.db.get_family_from_handle(family_handle)
if family is None:
continue
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.db.get_person_from_handle(child_handle)
if child is None:
continue
# Go through once looking for direct evidence:
for ev_ref in child.get_primary_event_ref_list():
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_birth():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
# if sibling birth date too far away, then not alive:
year = dobj.get_year()
if year != 0:
# sibling birth date
return (Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF),
Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF + self.MAX_AGE_PROB_ALIVE),
_("sibling birth date"),
child)
elif ev and ev.type.is_death():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
# if sibling death date too far away, then not alive:
year = dobj.get_year()
if year != 0:
# sibling death date
return (Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF - self.MAX_AGE_PROB_ALIVE),
Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF - self.MAX_AGE_PROB_ALIVE
+ self.MAX_AGE_PROB_ALIVE),
_("sibling death date"),
child)
# Go through again looking for fallback:
for ev_ref in child.get_primary_event_ref_list():
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_birth_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
# if sibling birth date too far away, then not alive:
year = dobj.get_year()
if year != 0:
# sibling birth date
return (Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF),
Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF + self.MAX_AGE_PROB_ALIVE),
_("sibling birth-related date"),
child)
elif ev and ev.type.is_death_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
# if sibling death date too far away, then not alive:
year = dobj.get_year()
if year != 0:
# sibling death date
return (Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF - self.MAX_AGE_PROB_ALIVE),
Date().copy_ymd(year - self.MAX_SIB_AGE_DIFF - self.MAX_AGE_PROB_ALIVE + self.MAX_AGE_PROB_ALIVE),
_("sibling death-related date"),
child)
if not is_spouse: # if you are not in recursion, let's recurse:
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if family:
mother_handle = family.get_mother_handle()
father_handle = family.get_father_handle()
if mother_handle == person.handle and father_handle:
father = self.db.get_person_from_handle(father_handle)
date1, date2, explain, other = self.probably_alive_range(father, is_spouse=True)
if date1 and date2:
return date1, date2, _("a spouse, ") + explain, other
elif father_handle == person.handle and mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
date1, date2, explain, other = self.probably_alive_range(mother, is_spouse=True)
if date1 and date2:
return date1, date2, _("a spouse, ") + explain, other
# Let's check the family events and see if we find something
for ref in family.get_event_ref_list():
if ref:
event = self.db.get_event_from_handle(ref.ref)
if event:
date = event.get_date_object()
year = date.get_year()
if year != 0:
other = None
if person.handle == mother_handle and father_handle:
other = self.db.get_person_from_handle(father_handle)
elif person.handle == father_handle and mother_handle:
other = self.db.get_person_from_handle(mother_handle)
return (Date().copy_ymd(year - self.AVG_GENERATION_GAP),
Date().copy_ymd(year - self.AVG_GENERATION_GAP +
self.MAX_AGE_PROB_ALIVE),
_("event with spouse"), other)
# Try looking for descendants that were born more than a lifespan
# ago.
def descendants_too_old (person, years):
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if not family:
# can happen with LivingProxyDb(PrivateProxyDb(db))
continue
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.db.get_person_from_handle(child_handle)
child_birth_ref = child.get_birth_ref()
if child_birth_ref:
child_birth = self.db.get_event_from_handle(child_birth_ref.ref)
dobj = child_birth.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
d = Date(dobj)
val = d.get_start_date()
val = d.get_year() - years
d.set_year(val)
return (d, d.copy_offset_ymd(self.MAX_AGE_PROB_ALIVE),
_("descendant birth date"),
child)
child_death_ref = child.get_death_ref()
if child_death_ref:
child_death = self.db.get_event_from_handle(child_death_ref.ref)
dobj = child_death.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- self.AVG_GENERATION_GAP),
dobj.copy_offset_ymd(- self.AVG_GENERATION_GAP + self.MAX_AGE_PROB_ALIVE),
_("descendant death date"),
child)
date1, date2, explain, other = descendants_too_old (child, years + self.AVG_GENERATION_GAP)
if date1 and date2:
return date1, date2, explain, other
# Check fallback data:
for ev_ref in child.get_primary_event_ref_list():
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_birth_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
d = Date(dobj)
val = d.get_start_date()
val = d.get_year() - years
d.set_year(val)
return (d, d.copy_offset_ymd(self.MAX_AGE_PROB_ALIVE),
_("descendant birth-related date"),
child)
elif ev and ev.type.is_death_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- self.AVG_GENERATION_GAP),
dobj.copy_offset_ymd(- self.AVG_GENERATION_GAP + self.MAX_AGE_PROB_ALIVE),
_("descendant death-related date"),
child)
return (None, None, "", None)
# If there are descendants that are too old for the person to have
# been alive in the current year then they must be dead.
date1, date2, explain, other = None, None, "", None
try:
date1, date2, explain, other = descendants_too_old(person, self.AVG_GENERATION_GAP)
except RuntimeError:
raise DatabaseError(
_("Database error: %s is defined as his or her own ancestor") %
name_displayer.display(person))
if date1 and date2:
return (date1, date2, explain, other)
def ancestors_too_old(person, year):
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.db.get_family_from_handle(family_handle)
if not family:
# can happen with LivingProxyDb(PrivateProxyDb(db))
return (None, None, "", None)
father_handle = family.get_father_handle()
if father_handle:
father = self.db.get_person_from_handle(father_handle)
father_birth_ref = father.get_birth_ref()
if father_birth_ref and father_birth_ref.get_role().is_primary():
father_birth = self.db.get_event_from_handle(
father_birth_ref.ref)
dobj = father_birth.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year),
dobj.copy_offset_ymd(- year + self.MAX_AGE_PROB_ALIVE),
_("ancestor birth date"),
father)
father_death_ref = father.get_death_ref()
if father_death_ref and father_death_ref.get_role().is_primary():
father_death = self.db.get_event_from_handle(
father_death_ref.ref)
dobj = father_death.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE),
dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE + self.MAX_AGE_PROB_ALIVE),
_("ancestor death date"),
father)
# Check fallback data:
for ev_ref in father.get_primary_event_ref_list():
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_birth_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year),
dobj.copy_offset_ymd(- year + self.MAX_AGE_PROB_ALIVE),
_("ancestor birth-related date"),
father)
elif ev and ev.type.is_death_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE),
dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE + self.MAX_AGE_PROB_ALIVE),
_("ancestor death-related date"),
father)
date1, date2, explain, other = ancestors_too_old (father, year - self.AVG_GENERATION_GAP)
if date1 and date2:
return date1, date2, explain, other
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
mother_birth_ref = mother.get_birth_ref()
if mother_birth_ref and mother_birth_ref.get_role().is_primary():
mother_birth = self.db.get_event_from_handle(mother_birth_ref.ref)
dobj = mother_birth.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year),
dobj.copy_offset_ymd(- year + self.MAX_AGE_PROB_ALIVE),
_("ancestor birth date"),
mother)
mother_death_ref = mother.get_death_ref()
if mother_death_ref and mother_death_ref.get_role().is_primary():
mother_death = self.db.get_event_from_handle(
mother_death_ref.ref)
dobj = mother_death.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE),
dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE + self.MAX_AGE_PROB_ALIVE),
_("ancestor death date"),
mother)
# Check fallback data:
for ev_ref in mother.get_primary_event_ref_list():
ev = self.db.get_event_from_handle(ev_ref.ref)
if ev and ev.type.is_birth_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year),
dobj.copy_offset_ymd(- year + self.MAX_AGE_PROB_ALIVE),
_("ancestor birth-related date"),
mother)
elif ev and ev.type.is_death_fallback():
dobj = ev.get_date_object()
if dobj.get_start_date() != Date.EMPTY:
return (dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE),
dobj.copy_offset_ymd(- year - self.MAX_AGE_PROB_ALIVE + self.MAX_AGE_PROB_ALIVE),
_("ancestor death-related date"),
mother)
date1, date2, explain, other = ancestors_too_old (mother, year - self.AVG_GENERATION_GAP)
if date1 and date2:
return (date1, date2, explain, other)
return (None, None, "", None)
# If there are ancestors that would be too old in the current year
# then assume our person must be dead too.
date1, date2, explain, other = ancestors_too_old (person, - self.AVG_GENERATION_GAP)
if date1 and date2:
return (date1, date2, explain, other)
# If we can't find any reason to believe that they are dead we
# must assume they are alive.
return (None, None, "", None)
#-------------------------------------------------------------------------
#
# probably_alive
#
#-------------------------------------------------------------------------
def probably_alive(person, db,
current_date=None,
limit=0,
max_sib_age_diff=None,
max_age_prob_alive=None,
avg_generation_gap=None,
return_range=False):
"""
Return true if the person may be alive on current_date.
This works by a process of elimination. If we can't find a good
reason to believe that someone is dead then we assume they must
be alive.
:param current_date: a date object that is not estimated or modified
(defaults to today)
:param limit: number of years to check beyond death_date
:param max_sib_age_diff: maximum sibling age difference, in years
:param max_age_prob_alive: maximum age of a person, in years
:param avg_generation_gap: average generation gap, in years
"""
# First, get the real database to use all people
# for determining alive status:
basedb = db.basedb
# Now, we create a wrapper for doing work:
pb = ProbablyAlive(basedb, max_sib_age_diff,
max_age_prob_alive,
avg_generation_gap)
birth, death, explain, relative = pb.probably_alive_range(person)
if current_date is None:
current_date = Today()
if not birth or not death:
# no evidence, must consider alive
return (True, None, None, _("no evidence"), None)
# must have dates from here:
if limit:
death += limit # add these years to death
# Finally, check to see if current_date is between dates
result = (current_date.match(birth, ">=") and
current_date.match(death, "<="))
if return_range:
return (result, birth, death, explain, relative)
else:
return result
def probably_alive_range(person, db,
max_sib_age_diff=None,
max_age_prob_alive=None,
avg_generation_gap=None):
"""
Computes estimated birth and death dates.
Returns: (birth_date, death_date, explain_text, related_person)
"""
# First, find the real database to use all people
# for determining alive status:
from ..proxy.proxybase import ProxyDbBase
basedb = db
while isinstance(basedb, ProxyDbBase):
basedb = basedb.db
# Now, we create a wrapper for doing work:
pb = ProbablyAlive(basedb, max_sib_age_diff,
max_age_prob_alive, avg_generation_gap)
return pb.probably_alive_range(person)
def update_constants():
"""
Used to update the constants that are cached in this module.
"""
from ..config import config
global _MAX_AGE_PROB_ALIVE, _MAX_SIB_AGE_DIFF, _AVG_GENERATION_GAP
_MAX_AGE_PROB_ALIVE = config.get('behavior.max-age-prob-alive')
_MAX_SIB_AGE_DIFF = config.get('behavior.max-sib-age-diff')
_AVG_GENERATION_GAP = config.get('behavior.avg-generation-gap')
| arunkgupta/gramps | gramps/gen/utils/alive.py | Python | gpl-2.0 | 26,674 | 0.005436 |
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import sys, Ice, Test
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def allTests(communicator):
sys.stdout.write("testing stringToProxy... ")
sys.stdout.flush()
base = communicator.stringToProxy("test:default -p 12010")
test(base)
print("ok")
sys.stdout.write("testing checked cast... ")
sys.stdout.flush()
obj = Test.TestIntfPrx.checkedCast(base)
test(obj)
test(obj == base)
print("ok")
sys.stdout.write("creating/destroying/recreating object adapter... ")
sys.stdout.flush()
adapter = communicator.createObjectAdapterWithEndpoints("TransientTestAdapter", "default")
try:
communicator.createObjectAdapterWithEndpoints("TransientTestAdapter", "default")
test(False)
except Ice.LocalException:
pass
adapter.destroy()
adapter = communicator.createObjectAdapterWithEndpoints("TransientTestAdapter", "default")
adapter.destroy()
print("ok")
sys.stdout.write("creating/activating/deactivating object adapter in one operation... ")
sys.stdout.flush()
obj.transient()
print("ok")
sys.stdout.write("deactivating object adapter in the server... ")
sys.stdout.flush()
obj.deactivate()
print("ok")
sys.stdout.write("testing connection closure... ");
sys.stdout.flush();
for x in range(10):
initData = Ice.InitializationData();
initData.properties = communicator.getProperties().clone();
comm = Ice.initialize(initData);
comm.stringToProxy("test:default -p 12010").ice_pingAsync();
comm.destroy();
print("ok");
sys.stdout.write("testing whether server is gone... ")
sys.stdout.flush()
try:
obj.ice_timeout(100).ice_ping() # Use timeout to speed up testing on Windows
test(False)
except Ice.LocalException:
print("ok")
return obj
| ljx0305/ice | python/test/Ice/adapterDeactivation/AllTests.py | Python | gpl-2.0 | 2,229 | 0.007627 |
import unittest
import zope.component
from zExceptions import Unauthorized
from Products.PloneTestCase import ptc
from Products.PloneTestCase.ptc import default_user
from pmr2.oauth.interfaces import ITokenManager, IConsumerManager
from pmr2.oauth.interfaces import IScopeManager
from pmr2.oauth.token import Token
from pmr2.oauth.consumer import Consumer
from pmr2.oauth.browser import consumer
from pmr2.oauth.browser import token
from pmr2.oauth.browser import user
from pmr2.oauth.tests.base import TestRequest
class FormTestCase(ptc.PloneTestCase):
"""
Testing functionalities of forms that don't fit well into doctests.
"""
def afterSetUp(self):
request = TestRequest()
self.consumerManager = zope.component.getMultiAdapter(
(self.portal, request), IConsumerManager)
self.consumer = Consumer('consumer.example.com', 'consumer-secret')
self.consumerManager.add(self.consumer)
self.tokenManager = zope.component.getMultiAdapter(
(self.portal, request), ITokenManager)
self.scopeManager = zope.component.getMultiAdapter(
(self.portal, request), IScopeManager)
self.reqtoken = self.tokenManager.generateRequestToken(
self.consumer.key, 'oob')
self.scopeManager.requestScope(self.reqtoken.key, None)
def test_0000_authform_render(self):
request = TestRequest(form={
'oauth_token': self.reqtoken.key,
})
form = token.AuthorizeTokenForm(self.portal, request)
form.update()
result = form.render()
self.assertTrue('_authenticator' in result)
def test_0001_authform_post_authfail(self):
request = TestRequest(form={
'oauth_token': self.reqtoken.key,
'form.buttons.approve': 1,
})
# simulate lack of CSRF
request.form['_authenticator'] = None
form = token.AuthorizeTokenForm(self.portal, request)
self.assertRaises(Unauthorized, form.update)
def test_0002_authform_post_authgood(self):
request = TestRequest(form={
'oauth_token': self.reqtoken.key,
'form.buttons.approve': 1,
})
form = token.AuthorizeTokenForm(self.portal, request)
form.update()
result = form.render()
self.assertTrue(self.reqtoken.verifier in result)
def test_1000_consumermanageform_fail(self):
request = TestRequest(form={
'form.buttons.remove': 1,
})
request.form['_authenticator'] = None
form = consumer.ConsumerManageForm(self.portal, request)
self.assertRaises(Unauthorized, form.update)
def test_2000_usertokenform_fail(self):
# have to add a token to show the button.
atok = self.tokenManager._generateBaseToken(self.consumer.key)
atok.access = True
atok.user = default_user
self.tokenManager.add(atok)
request = TestRequest(form={
'form.buttons.revoke': 1,
})
request.form['_authenticator'] = None
form = user.UserTokenForm(self.portal, request)
self.assertRaises(Unauthorized, form.update)
def test_2100_usertokenform_revoke(self):
# have to add a token to show the button.
atok = self.tokenManager._generateBaseToken(self.consumer.key)
atok.access = True
atok.user = default_user
self.tokenManager.add(atok)
self.login(default_user)
request = TestRequest()
form = user.UserTokenForm(self.portal, request)
result = form()
self.assertTrue(atok.key in result)
self.assertTrue('Revoke' in result)
request = TestRequest(form={
'form.widgets.key': [atok.key],
'form.buttons.revoke': 1,
})
form = user.UserTokenForm(self.portal, request)
result = form()
self.assertFalse(atok.key in result)
# Ideally this would not be rendered, but it is, due to how the
# button and handler are coupled together. If the button is not
# available the action wouldn't be executed, which would have
# meant that the token wouldn't be revoked...
# This whole issue can probably be sidestepped with a redirect.
# self.assertFalse('Revoke' in result)
def test_2200_usertokenform_no_token_no_button(self):
# have to add a token to show the button.
request = TestRequest()
form = user.UserTokenForm(self.portal, request)
self.assertFalse('Revoke' in form())
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(FormTestCase))
return suite
| PMR2/pmr2.oauth | pmr2/oauth/tests/test_form.py | Python | gpl-2.0 | 4,711 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-21 22:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='skill',
name='level',
field=models.CharField(choices=[('beginner', 'Beginner'), ('intermediate', 'Intermediate'), ('advanced', 'Advanced'), ('expert', 'Expert')], max_length=50, verbose_name="What's your level?"),
),
]
| devolio/devolio | users/migrations/0002_auto_20170321_2209.py | Python | gpl-3.0 | 589 | 0.001698 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib
from urllib import unquote
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common import utils, exceptions
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, HTTPPreconditionFailed
MAX_FILE_SIZE = 5368709122
MAX_META_NAME_LENGTH = 128
MAX_META_VALUE_LENGTH = 256
MAX_META_COUNT = 90
MAX_META_OVERALL_SIZE = 4096
MAX_HEADER_SIZE = 8192
MAX_OBJECT_NAME_LENGTH = 1024
CONTAINER_LISTING_LIMIT = 10000
ACCOUNT_LISTING_LIMIT = 10000
MAX_ACCOUNT_NAME_LENGTH = 256
MAX_CONTAINER_NAME_LENGTH = 256
# If adding an entry to DEFAULT_CONSTRAINTS, note that
# these constraints are automatically published by the
# proxy server in responses to /info requests, with values
# updated by reload_constraints()
DEFAULT_CONSTRAINTS = {
'max_file_size': MAX_FILE_SIZE,
'max_meta_name_length': MAX_META_NAME_LENGTH,
'max_meta_value_length': MAX_META_VALUE_LENGTH,
'max_meta_count': MAX_META_COUNT,
'max_meta_overall_size': MAX_META_OVERALL_SIZE,
'max_header_size': MAX_HEADER_SIZE,
'max_object_name_length': MAX_OBJECT_NAME_LENGTH,
'container_listing_limit': CONTAINER_LISTING_LIMIT,
'account_listing_limit': ACCOUNT_LISTING_LIMIT,
'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH,
'max_container_name_length': MAX_CONTAINER_NAME_LENGTH,
}
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE
EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global contraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name in DEFAULT_CONSTRAINTS:
try:
value = int(constraints_conf.get('swift-constraints', name))
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
reload_constraints()
# Maximum slo segments in buffer
MAX_BUFFERED_SLO_SEGMENTS = 10000
#: Query string format= values to their corresponding content-type values
FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',
'xml': 'application/xml'}
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in req.headers.iteritems():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
return HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns HTTPRequestEntityTooLarge: the object is too large
:returns HTTPLengthRequired: missing content-length header and not
a chunked request
:returns HTTPBadRequest: missing or bad content-type header, or
bad metadata
"""
if req.content_length and req.content_length > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(request=req)
if 'X-Copy-From' in req.headers and req.content_length:
return HTTPBadRequest(body='Copy requests require a zero byte body',
request=req, content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body='No content type')
if not check_utf8(req.headers['Content-Type']):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
return check_metadata(req, 'object')
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
path = os.path.join(root, drive)
return utils.ismount(path)
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
def valid_timestamp(request):
"""
Helper function to extract a timestamp from requests that require one.
:param request: the swob request object
:returns: a valid Timestamp instance
:raises: HTTPBadRequest on missing or invalid X-Timestamp
"""
try:
return request.timestamp
except exceptions.InvalidTimestamp as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
def check_utf8(string):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any null character.
:param string: string to be validated
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, unicode):
string.encode('utf-8')
else:
string.decode('UTF-8')
return '\x00' not in string
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
def check_copy_from_header(req):
"""
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise: HTTPPreconditionFailed if x-copy-from value
is not well formatted.
"""
src_header = unquote(req.headers.get('X-Copy-From'))
if not src_header.startswith('/'):
src_header = '/' + src_header
try:
return utils.split_path(src_header, 2, 2, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body='X-Copy-From header must be of the form'
'<container name>/<object name>')
| kalrey/swift | swift/common/constraints.py | Python | apache-2.0 | 10,266 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import pyjxslt
from dict_compare import dict_compare
import json
xml1 = """<?xml version="1.0" encoding="UTF-8"?>
<doc>
<entry id='17'>FOO</entry>
<entry id='42'>BAR</entry>
</doc>"""
expected_json = """{
"doc": {
"entry": [
{
"_content": "FOO",
"id": "17"
},
{
"_content": "BAR",
"id": "42"
}
]
}
}"""
bad_xml = """<?xml version="1.0" encoding="UTF-8"?>
<doc>
<entry id='17'>FOO</entry>
<entry id='42'>BAR</entry>
</dod>"""
xml_with_processing_instruction = """<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="./datadict_v2.xsl"?>
<data_table id="pht003897.v1" study_id="phs000722.v1" participant_set="1">
</data_table>"""
expected_pi = '{ "data_table": { "id": "pht003897.v1", "study_id": "phs000722.v1", "participant_set": "1" } }'
expected_bad = 'ERROR: Transformer exception: org.xml.sax.SAXParseException; lineNumber: 5; columnNumber: 3; ' \
'The element type "doc" must be terminated by the matching end-tag "</doc>".'
class XMLToJsonTestCase(unittest.TestCase):
# Just a quick test as the actual transform is tested elsewhere. Our job is just to make sure
# that we get what we expect through the gateway
gw = pyjxslt.Gateway()
if not gw.gateway_connected(reconnect=False):
print("Gateway must be running on port 25333")
def compare_jsons(self, json1, json2):
json1d = json.loads(json1)
try:
json2d = json.loads(json2)
except json.JSONDecodeError as e:
print(str(e))
return False
success, txt = dict_compare(json1d, json2d)
if not success:
print(txt)
return success
def test1(self):
self.assertTrue(self.compare_jsons(expected_json, self.gw.to_json(xml1)))
self.assertEqual(expected_bad, self.gw.to_json(bad_xml))
self.assertTrue(self.compare_jsons(expected_pi, self.gw.to_json(xml_with_processing_instruction)))
class NoGatewayTestCase(unittest.TestCase):
def test_gw_down(self):
gw = pyjxslt.Gateway(port=23456) # a non-existent port
self.assertIsNone(gw.to_json(xml1))
if __name__ == '__main__':
unittest.main()
| cts2/pyjxslt | pyjxslt-python/tests/testXMLtoJSON.py | Python | apache-2.0 | 3,904 | 0.003842 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import os
import sys
from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BooleanField
from fate_arch.federation import FederationEngine
from fate_arch.metastore.base_model import DateTimeField
from fate_arch.common import file_utils, log, EngineType, conf_utils
from fate_arch.common.conf_utils import decrypt_database_config
from fate_arch.metastore.base_model import JSONField, SerializedField, BaseModel
LOGGER = log.getLogger()
DATABASE = decrypt_database_config()
is_standalone = conf_utils.get_base_config("default_engines", {}).get(EngineType.FEDERATION).upper() == \
FederationEngine.STANDALONE
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
key = str(cls) + str(os.getpid())
if key not in instances:
instances[key] = cls(*args, **kw)
return instances[key]
return _singleton
@singleton
class BaseDataBase(object):
def __init__(self):
database_config = DATABASE.copy()
db_name = database_config.pop("name")
if is_standalone:
from playhouse.apsw_ext import APSWDatabase
self.database_connection = APSWDatabase(file_utils.get_project_base_directory("fate_sqlite.db"))
else:
from playhouse.pool import PooledMySQLDatabase
self.database_connection = PooledMySQLDatabase(db_name, **database_config)
DB = BaseDataBase().database_connection
def close_connection():
try:
if DB:
DB.close()
except Exception as e:
LOGGER.exception(e)
class DataBaseModel(BaseModel):
class Meta:
database = DB
@DB.connection_context()
def init_database_tables():
members = inspect.getmembers(sys.modules[__name__], inspect.isclass)
table_objs = []
create_failed_list = []
for name, obj in members:
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
table_objs.append(obj)
LOGGER.info(f"start create table {obj.__name__}")
try:
obj.create_table()
LOGGER.info(f"create table success: {obj.__name__}")
except Exception as e:
LOGGER.exception(e)
create_failed_list.append(obj.__name__)
if create_failed_list:
LOGGER.info(f"create tables failed: {create_failed_list}")
raise Exception(f"create tables failed: {create_failed_list}")
class StorageConnectorModel(DataBaseModel):
f_name = CharField(max_length=100, primary_key=True)
f_engine = CharField(max_length=100, index=True) # 'MYSQL'
f_connector_info = JSONField()
class Meta:
db_table = "t_storage_connector"
class StorageTableMetaModel(DataBaseModel):
f_name = CharField(max_length=100, index=True)
f_namespace = CharField(max_length=100, index=True)
f_address = JSONField()
f_engine = CharField(max_length=100) # 'EGGROLL', 'MYSQL'
f_store_type = CharField(max_length=50, null=True) # store type
f_options = JSONField()
f_partitions = IntegerField(null=True)
f_id_delimiter = CharField(null=True)
f_in_serialized = BooleanField(default=True)
f_have_head = BooleanField(default=True)
f_extend_sid = BooleanField(default=False)
f_auto_increasing_sid = BooleanField(default=False)
f_schema = SerializedField()
f_count = BigIntegerField(null=True)
f_part_of_data = SerializedField()
f_description = TextField(default='')
f_read_access_time = BigIntegerField(null=True)
f_read_access_date = DateTimeField(null=True)
f_write_access_time = BigIntegerField(null=True)
f_write_access_date = DateTimeField(null=True)
class Meta:
db_table = "t_storage_table_meta"
primary_key = CompositeKey('f_name', 'f_namespace')
class SessionRecord(DataBaseModel):
f_engine_session_id = CharField(max_length=150, null=False)
f_manager_session_id = CharField(max_length=150, null=False)
f_engine_type = CharField(max_length=10, index=True)
f_engine_name = CharField(max_length=50, index=True)
f_engine_address = JSONField()
class Meta:
db_table = "t_session_record"
primary_key = CompositeKey("f_engine_type", "f_engine_name", "f_engine_session_id")
| FederatedAI/FATE | python/fate_arch/metastore/db_models.py | Python | apache-2.0 | 4,898 | 0.001429 |
# -*- coding: utf-8 -*-
"""
Tests.
"""
import unittest
from bruges.rockphysics import fluidsub
# Inputs... GAS case
vp_gas = 2429.0
vs_gas = 1462.4
rho_gas = 2080.
# Expected outputs... BRINE case
vp_brine = 2850.5
vs_brine = 1416.1
rho_brine = 2210.0
phi = 0.275 # Don't know this... reading from fig
rhohc = 250.0 # gas
rhow = 1040.0 # brine
sw = 0.3 # Don't know this... just guessing
swnew = 1.0 # Don't know this... just guessing
khc = 207000000.0 # gas
kw = 2950000000.0 # brine
kclay = 25000000000.0
kqtz = 37000000000.0
vclay = 0.05
kmin = 36266406250.0 # Don't know this... reading from fig
class FluidsubTest(unittest.TestCase):
"""
Tests fluid sub calculations against Smith et al 2003.
https://dl.dropboxusercontent.com/u/14965965/Smith_etal_2003.pdf
"""
def test_avseth(self):
# Base case: gas
# Subbing with: brine
sub = fluidsub.avseth_fluidsub(vp=vp_gas,
vs=vs_gas,
rho=rho_gas,
phi=phi,
rhof1=rhohc,
rhof2=rhow,
kmin=37000000000,
kf1=khc,
kf2=kw)
self.assertAlmostEqual(sub[0], vp_brine, places=-1) # Cannot match :(
self.assertAlmostEqual(sub[1], vs_brine, places=-1) # Cannot match :(
self.assertAlmostEqual(sub[2], rho_brine, places=-1) # Cannot match :(
def test_smith(self):
# Base case: gas
# Subbing with: brine
sub = fluidsub.smith_fluidsub(vp=vp_gas,
vs=vs_gas,
rho=rho_gas,
phi=phi,
rhohc=rhohc,
rhow=rhow,
sw=sw,
swnew=swnew,
khc=khc,
kw=kw,
kclay=kclay,
kqtz=kqtz,
vclay=vclay)
self.assertAlmostEqual(sub[0], vp_brine, places=-1)
self.assertAlmostEqual(sub[1], vs_brine, places=-1)
self.assertAlmostEqual(sub[2], rho_brine, places=-1) # Cannot match :(
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(FluidsubTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| agile-geoscience/agilegeo | bruges/rockphysics/test/fluidsub_test.py | Python | apache-2.0 | 2,698 | 0 |
r"""
Used to configure the main parameters for each implemented model.
.. currentmodule:: compmech.conecyl.modelDB
"""
import numpy as np
from scipy.sparse import coo_matrix
from clpt import *
from fsdt import *
db = {
'clpt_donnell_bc1': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc1,
'linear': clpt_donnell_bc1_linear,
'non-linear': clpt_donnell_bc1_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc2,
'linear': clpt_donnell_bc2_linear,
'non-linear': clpt_donnell_bc2_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'iso_clpt_donnell_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc2,
'linear': iso_clpt_donnell_bc2_linear,
'non-linear': iso_clpt_donnell_bc2_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc3,
'linear': clpt_donnell_bc3_linear,
'non-linear': clpt_donnell_bc3_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'iso_clpt_donnell_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc3,
'linear': iso_clpt_donnell_bc3_linear,
'non-linear': iso_clpt_donnell_bc3_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bc4': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc4,
'linear': clpt_donnell_bc4_linear,
'non-linear': clpt_donnell_bc4_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_donnell_bcn': {
'linear static': True,
'linear buckling': False,
'non-linear static': None,
'commons': clpt_commons_bcn,
'linear': clpt_donnell_bcn_linear,
'non-linear': None,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 8,
},
'clpt_sanders_bc1': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc1,
'linear': clpt_sanders_bc1_linear,
'non-linear': clpt_sanders_bc1_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_sanders_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc2,
'linear': clpt_sanders_bc2_linear,
'non-linear': clpt_sanders_bc2_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_sanders_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc3,
'linear': clpt_sanders_bc3_linear,
'non-linear': clpt_sanders_bc3_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_sanders_bc4': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': clpt_commons_bc4,
'linear': clpt_sanders_bc4_linear,
'non-linear': clpt_sanders_bc4_nonlinear,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 3,
'num2': 6,
},
'clpt_geier1997_bc2': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': clpt_geier1997_bc2,
'linear': clpt_geier1997_bc2,
'non-linear': None,
'dofs': 3,
'e_num': 6,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 3,
},
'fsdt_donnell_bcn': {
'linear static': True,
'linear buckling': False,
'non-linear static': True,
'commons': fsdt_commons_bcn,
'linear': fsdt_donnell_bcn_linear,
'non-linear': fsdt_donnell_bcn_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc1': {
'linear static': True,
'linear buckling': True,
'non-linear static': True,
'commons': fsdt_commons_bc1,
'linear': fsdt_donnell_bc1_linear,
'non-linear': fsdt_donnell_bc1_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc2': {
'linear static': True,
'linear buckling': True,
'non-linear static': False,
'commons': fsdt_commons_bc2,
'linear': fsdt_donnell_bc2_linear,
'non-linear': fsdt_donnell_bc2_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc3': {
'linear static': True,
'linear buckling': True,
'non-linear static': False,
'commons': fsdt_commons_bc3,
'linear': fsdt_donnell_bc3_linear,
'non-linear': fsdt_donnell_bc3_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_donnell_bc4': {
'linear static': True,
'linear buckling': True,
'non-linear static': False,
'commons': fsdt_commons_bc4,
'linear': fsdt_donnell_bc4_linear,
'non-linear': fsdt_donnell_bc4_nonlinear,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_sanders_bcn': {
'linear static': True,
'linear buckling': False,
'non-linear static': False,
'commons': fsdt_commons_bcn,
'linear': fsdt_sanders_bcn_linear,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 1,
'num0': 3,
'num1': 5,
'num2': 10,
},
'fsdt_shadmehri2012_bc2': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': fsdt_shadmehri2012_bc2,
'linear': fsdt_shadmehri2012_bc2,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 5,
},
'fsdt_shadmehri2012_bc3': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': fsdt_shadmehri2012_bc3,
'linear': fsdt_shadmehri2012_bc3,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 5,
},
'fsdt_geier1997_bc2': {
'linear static': None,
'linear buckling': True,
'non-linear static': None,
'commons': fsdt_geier1997_bc2,
'linear': fsdt_geier1997_bc2,
'non-linear': None,
'dofs': 5,
'e_num': 8,
'i0': 0,
'j0': 0,
'num0': 0,
'num1': 0,
'num2': 5,
},
}
def get_linear_matrices(cc, combined_load_case=None):
r"""Obtain the right functions to calculate hte linear matrices
for a given model.
The ``model`` parameter of the ``ConeCyl`` object is used to search
for the functions ``fG0``, ``fG0_cyl``, ``fkG0``, ``fkG0_cyl``,
and the matrix ``k0edges`` is calculated, when applicable.
Parameters
----------
cc : compmech.conecyl.ConeCyl
The ``ConeCyl`` object.
combined_load_case : int, optional
As explained in the :meth:`ConeCyl.lb() <compmech.conecyl.ConeCyl.lb>`
method, the integer indicating
which combined load case should be used. Default is ``None``.
Returns
-------
out : tuple
A tuple containing ``(fk0, fk0_cyl, fkG0, fkG0_cyl, k0edges)``.
"""
r1 = cc.r1
r2 = cc.r2
L = cc.L
m1 = cc.m1
m2 = cc.m2
n2 = cc.n2
model = cc.model
try:
if 'iso_' in model:
fk0edges = db[model[4:]]['linear'].fk0edges
else:
fk0edges = db[model]['linear'].fk0edges
except AttributeError:
k0edges = None
if model == 'clpt_donnell_bc1':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'iso_clpt_donnell_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'iso_clpt_donnell_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bc4':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_donnell_bcn':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kwBot, cc.kwTop,
cc.kphixBot, cc.kphixTop,
cc.kphitBot, cc.kphitTop)
elif model == 'clpt_sanders_bc1':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_sanders_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_sanders_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_sanders_bc4':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'clpt_geier1997_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2, L,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc1':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bc4':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_donnell_bcn':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kwBot, cc.kwTop,
cc.kphixBot, cc.kphixTop,
cc.kphitBot, cc.kphitTop)
elif model == 'fsdt_sanders_bcn':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kvBot, cc.kvTop,
cc.kwBot, cc.kwTop,
cc.kphixBot, cc.kphixTop,
cc.kphitBot, cc.kphitTop)
elif model == 'fsdt_shadmehri2012_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_shadmehri2012_bc3':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kvBot, cc.kvTop,
cc.kphixBot, cc.kphixTop)
elif model == 'fsdt_geier1997_bc2':
k0edges = fk0edges(m1, m2, n2, r1, r2,
cc.kuBot, cc.kuTop,
cc.kphixBot, cc.kphixTop)
fk0 = db[model]['linear'].fk0
fk0_cyl = db[model]['linear'].fk0_cyl
if 'iso_' in model:
fkG0 = db[model[4:]]['linear'].fkG0
fkG0_cyl = db[model[4:]]['linear'].fkG0_cyl
else:
fkG0 = db[model]['linear'].fkG0
fkG0_cyl = db[model]['linear'].fkG0_cyl
return fk0, fk0_cyl, fkG0, fkG0_cyl, k0edges
valid_models = sorted(db.keys())
def get_model(model_name):
if not model_name in valid_models:
raise ValueError('ERROR - valid models are:\n ' +
'\n '.join(valid_models))
else:
return db[model_name]
| saullocastro/compmech | compmech/conecyl/modelDB.py | Python | bsd-3-clause | 17,932 | 0.001617 |
from __future__ import division
from math import *
def calculate(expr_string):
math_list = ['math', 'acos', 'asin', 'atan', 'atan2', 'ceil',
'cos', 'cosh', 'degrees', 'e', 'exp', 'fabs', 'floor', 'fmod',
'frexp', 'hypot', 'ldexp', 'log', 'log10', 'modf', 'pi',
'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh']
builtins_list = [abs]
local_ctx = dict([ (k, globals().get(k, None)) for k in math_list ])
local_ctx.update(dict([ (b.__name__, b) for b in builtins_list ]))
try:
return eval(expr_string, { "__builtins__": None }, local_ctx)
except (SyntaxError, TypeError, NameError):
return None
| vtbassmatt/django-expression-fields | src/expression_fields/expr.py | Python | mit | 668 | 0.01497 |
from django.core.exceptions import ValidationError
from django.test import TestCase
from api.test.models import _get_string_data_object
from api.models.data_nodes import *
class TestDataNode(TestCase):
INPUT_DATA=(
([(0,3),(0,1)], 'i'),
([(1,3),(0,2)], 'a'),
([(1,3),(1,2)], 'm'),
([(2,3),(0,5)], 'r'),
([(2,3),(1,5)], 'o'),
([(2,3),(2,5)], 'b'),
([(2,3),(3,5)], 'o'),
([(2,3),(4,5)], 't'),
)
def getTree(self, data):
root = DataNode.objects.create(type='string')
self.addData(root, data)
return root
def addData(self, root, data):
for data_path, value in data:
data_object = _get_string_data_object(value)
root.add_data_object(data_path, data_object)
def testAddDataObject(self):
root = self.getTree(self.INPUT_DATA)
# spot check [['i'],['a','m'],['r','o','b','o','t']]
self.assertEqual(root.get_data_object([(0,3),(0,1)]).substitution_value, 'i')
self.assertEqual(root.get_data_object([(1,3),(0,2)]).substitution_value, 'a')
self.assertEqual(root.get_data_object([(1,3),(1,2)]).substitution_value, 'm')
self.assertEqual(root.get_data_object([(2,3),(4,5)]).substitution_value, 't')
# Verify that we get the same result after saving
self.assertTrue(root.get_children()[0].id is None)
root.save_with_children()
self.assertEqual(root.get_data_object([(0,3),(0,1)]).substitution_value, 'i')
self.assertEqual(root.get_data_object([(1,3),(0,2)]).substitution_value, 'a')
self.assertEqual(root.get_data_object([(1,3),(1,2)]).substitution_value, 'm')
self.assertEqual(root.get_data_object([(2,3),(4,5)]).substitution_value, 't')
self.assertTrue(root.get_children()[0].id is not None)
def testMissingData(self):
input_data=(
([(0,3),(0,1)], 'i'),
#([(1,3),(0,2)], 'a'),
#([(1,3),(1,2)], 'm'),
([(2,3),(0,5)], 'r'),
#([(2,3),(1,5)], 'o'),
([(2,3),(2,5)], 'b'),
([(2,3),(3,5)], 'o'),
([(2,3),(4,5)], 't'),
)
root = self.getTree(input_data)
# spot check [['i'],['a','m'],['r','o','b','o','t']]
self.assertEqual(root.get_data_object([(0,3),(0,1)]).substitution_value, 'i')
with self.assertRaises(MissingBranchError):
root.get_data_object([(1,3),])
with self.assertRaises(MissingBranchError):
root.get_data_object([(2,3),(1,5)])
self.assertEqual(root.get_data_object([(2,3),(4,5)]).substitution_value, 't')
def testAddScalarDataObject(self):
root = DataNode.objects.create(type='string')
text = 'text'
data_object = _get_string_data_object(text)
data_path = []
root.add_data_object(data_path, data_object)
self.assertEqual(root.get_data_object(data_path).substitution_value, text)
def testAddScalarDataObjectTwice(self):
root = DataNode.objects.create(type='string')
text = 'text'
data_object = _get_string_data_object(text)
data_path = []
root.add_data_object(data_path, data_object)
with self.assertRaises(DataAlreadyExistsError):
root.add_data_object(data_path, data_object)
def testAddBranchTwice(self):
root = DataNode.objects.create(degree=2, type='string')
branch1 = root.add_branch(1, 1)
branch2 = root.add_branch(1, 1)
self.assertEqual(branch1.id, branch2.id)
def testAddBranchOverLeaf(self):
root = DataNode.objects.create(degree=2, type='string')
data_object = _get_string_data_object('text')
root.add_leaf(1, data_object)
with self.assertRaises(UnexpectedLeafNodeError):
root.add_branch(1, 1)
def testAddLeafOverBranch(self):
root = DataNode.objects.create(degree=2, type='string')
data_object = _get_string_data_object('text')
root.add_leaf(1, data_object)
with self.assertRaises(UnexpectedLeafNodeError):
root.add_branch(1, 1)
def testAddLeafTwice(self):
root = DataNode.objects.create(degree=1, type='string')
data_object = _get_string_data_object('text')
root.add_leaf(0, data_object)
with self.assertRaises(NodeAlreadyExistsError):
root.add_leaf(0, data_object)
def testIndexOutOfRangeError(self):
degree = 2
data_object = _get_string_data_object('text')
root = DataNode.objects.create(degree=degree, type='string')
with self.assertRaises(IndexOutOfRangeError):
root.add_leaf(degree, data_object)
with self.assertRaises(IndexOutOfRangeError):
root.add_leaf(-1, data_object)
def testDegreeMismatchError(self):
data_object = _get_string_data_object('text')
root = DataNode.objects.create(degree=2, type='string')
root.add_branch(1, 2)
with self.assertRaises(DegreeMismatchError):
root.add_branch(1, 3)
def testUnknownDegreeError(self):
data_object = _get_string_data_object('text')
root = DataNode.objects.create(type='string')
with self.assertRaises(UnknownDegreeError):
root.add_leaf(0, data_object)
def testIsReady(self):
some_of_the_data=(
([(0,3),(0,1)], 'i'),
([(1,3),(0,2)], 'a'),
([(2,3),(0,5)], 'r'),
([(2,3),(1,5)], 'o'),
([(2,3),(2,5)], 'b'),
([(2,3),(4,5)], 't'),
)
the_rest_of_the_data = (
([(2,3),(3,5)], 'o'),
([(1,3),(1,2)], 'm'),
)
root = self.getTree(some_of_the_data)
self.assertFalse(root.is_ready([]))
self.assertFalse(root.is_ready([(2,3),]))
self.assertFalse(root.is_ready([(2,3),(3,5)]))
self.assertTrue(root.is_ready([(0,3),]))
self.assertTrue(root.is_ready([(0,3),(0,1)]))
self.addData(root, the_rest_of_the_data)
self.assertTrue(root.is_ready([]))
self.assertTrue(root.is_ready([(2,3),]))
self.assertTrue(root.is_ready([(2,3),(3,5)]))
self.assertTrue(root.is_ready([(0,3),]))
self.assertTrue(root.is_ready([(0,3),(0,1)]))
def testClone(self):
tree1 = self.getTree(self.INPUT_DATA)
child1 = tree1.get_node([(2,3)])
grandchild1 = tree1.get_node([(2,3),(4,5)])
tree2 = tree1.clone()
child2 = tree2.get_node([(2,3)])
grandchild2 = tree2.get_node([(2,3),(4,5)])
self.assertEqual(grandchild1.data_object.uuid, grandchild2.data_object.uuid)
self.assertNotEqual(tree1.uuid, tree2.uuid)
self.assertNotEqual(child1.uuid, child2.uuid)
self.assertNotEqual(grandchild1.uuid, grandchild2.uuid)
def testClone_withSeed(self):
tree1 = self.getTree(self.INPUT_DATA)
child1 = tree1.get_node([(2,3)])
grandchild1 = tree1.get_node([(2,3),(4,5)])
tree2 = DataNode.objects.create(type='string')
tree1.clone(seed=tree2)
child2 = tree2.get_node([(2,3)])
grandchild2 = tree2.get_node([(2,3),(4,5)])
self.assertEqual(grandchild1.data_object.uuid, grandchild2.data_object.uuid)
self.assertNotEqual(tree1.uuid, tree2.uuid)
self.assertNotEqual(child1.uuid, child2.uuid)
self.assertNotEqual(grandchild1.uuid, grandchild2.uuid)
def testClone_leaf(self):
leaf = DataNode.objects.create(type='string')
leaf.add_data_object(
[], _get_string_data_object(
'al ultimo se lo estan comiendo las hormigas'))
clone = leaf.clone()
self.assertNotEqual(leaf.uuid, clone.uuid)
self.assertEqual(leaf.data_object.uuid, clone.data_object.uuid)
def testFlattenedClone(self):
tree1 = self.getTree(self.INPUT_DATA)
penult_grandchild1 = tree1.get_node([(2,3),(3,5)])
last_grandchild1 = tree1.get_node([(2,3),(4,5)])
tree2 = tree1.flattened_clone()
penult_child2 = tree2.get_node([(6,8)])
last_child2 = tree2.get_node([(7,8)])
self.assertEqual(penult_grandchild1.data_object.uuid,
penult_child2.data_object.uuid)
self.assertEqual(last_grandchild1.data_object.uuid,
last_child2.data_object.uuid)
self.assertNotEqual(tree1.uuid, tree2.uuid)
self.assertNotEqual(penult_grandchild1.uuid, penult_child2.uuid)
self.assertNotEqual(last_grandchild1.uuid, last_child2.uuid)
def testFlattenedClone_leaf(self):
leaf = DataNode.objects.create(type='string')
leaf.add_data_object(
[], _get_string_data_object(
'al ultimo se lo estan comiendo las hormigas'))
clone = leaf.flattened_clone()
self.assertNotEqual(leaf.uuid, clone.uuid)
self.assertEqual(leaf.data_object.uuid, clone.data_object.uuid)
def testGetOrCreateNode_existing(self):
tree = self.getTree(self.INPUT_DATA)
# If node exists, return it.
old_node = tree.get_node([(2,3),(3,5)])
node = tree.get_or_create_node([(2,3),(3,5)])
self.assertEqual(old_node.uuid, node.uuid)
def testGetOrCreateNode_created(self):
tree = DataNode.objects.create(type='string')
# If node does not exist, create a path to it.
node = tree.get_or_create_node([(2,3),(3,5)])
new_node = tree.get_node([(2,3),(3,5)])
self.assertEqual(new_node.uuid, node.uuid)
def testCalculateContentsFingerprint(self):
node = self.getTree(self.INPUT_DATA)
self.assertEqual(
node.calculate_contents_fingerprint(),
'd7405829b255d1dd4af90780a4b20286')
def testCalculateContentsFingerprintOrderMatters(self):
swapped_order_input_data=(
([(0,3),(0,1)], 'i'),
([(1,3),(0,2)], 'a'),
([(1,3),(1,2)], 'm'),
([(2,3),(0,5)], 'r'),
([(2,3),(1,5)], 'o'),
([(2,3),(2,5)], 'b'),
([(2,3),(3,5)], 't'), # order swapped
([(2,3),(4,5)], 'o'), # order swapped
)
node1 = self.getTree(self.INPUT_DATA)
node2 = self.getTree(swapped_order_input_data)
self.assertNotEqual(
node1.calculate_contents_fingerprint(),
node2.calculate_contents_fingerprint())
| StanfordBioinformatics/loom | server/loomengine_server/api/test/models/test_data_nodes.py | Python | agpl-3.0 | 10,494 | 0.019726 |
from rest_framework.decorators import api_view, parser_classes
from rest_framework.parsers import JSONParser, FormParser, MultiPartParser
from rest_framework.response import Response
from .serializers import *
import json
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from django.contrib.auth.models import User
from workspace.models import Project, Theme
from copy import deepcopy
import markdown
import base64
import mimetypes
import yaml
from jinja2 import Template, Environment, meta
import traceback
import re
import os
from glide import *
from django.conf import settings
import pathlib, shutil, subprocess
def _isBinary(fileName):
fileType, encoding = mimetypes.guess_type(fileName)
if fileType.startswith('text/')\
or fileType == 'application/json'\
or fileType == 'application/x-latex'\
or fileType == 'application/javascript'\
or fileType == 'application/yaml'\
or fileName.endswith('.md'): # Just in case
return False
else:
return True
# @api_view(['GET'])
# def theme(request, slug):
# """
# Responds with a list of all the themes available
# or a theme when specified
# """
# themes = Theme.objects.all()
# if slug:
# themes = themes.filter(slug=slug)
# serializer = ThemeSerializer(themes, many=True)
# return Response(serializer.data)
# @api_view(['GET'])
# def project(request, slug):
# """
# Responds with a project object specified
# """
# projects = Project.objects.all()
# if slug:
# projects = projects.filter(slug=slug)
# serializer = ProjectSerializer(projects, many=True)
# return Response(serializer.data)
@api_view(['GET'])
def repositories(request):
"""
Responds with a list of repositories
that are accessible to the authenticated user
"""
accessToken = request.session['accessToken']
getAllReposUrl = 'https://api.github.com/user/repos?&per_page=100&access_token={}'
getAllReposUrl = getAllReposUrl.format(accessToken)
getAllReposUrl = getAuthUrl(getAllReposUrl)
with urlopen(getAllReposUrl) as allReposRes:
resStr = allReposRes.read().decode('utf-8')
return Response({ 'repositories': resStr })
@api_view(['GET'])
def readme(request, owner, repo):
"""
Responds with HTML-rendered README.md
that are accessible to the authenticated user
"""
accessToken = request.session['accessToken']
getReadmeUrl = 'https://api.github.com/repos/{}/{}/readme?access_token={}'
getReadmeUrl = getReadmeUrl.format(owner, repo, accessToken)
getReadmeUrl = getAuthUrl(getReadmeUrl)
# with urlopen(getReadmeUrl) as readmeRes:
# resStr = readmeRes.read().decode('utf-8')
# return Response({ 'branches': resStr })
req = Request(
url=getReadmeUrl, method='GET',
headers={'Content-Type': 'application/vnd.github.v3.html+json'})
try:
with urlopen(req) as readmeRes:
resStr = readmeRes.read().decode('utf-8')
readmeObj = json.loads(resStr)
mdContent = readmeObj['content']
if readmeObj['encoding'] == 'base64':
mdContent = base64.b64decode(mdContent).decode('utf-8')
res = _mdToHtml(mdContent)
return Response({
'readme': res
})
else:
return Response({
'error': 'decoding'
})
except HTTPError:
return Response({
'error': 'HTTPError'
})
@api_view(['POST'])
def cdn(request, owner, repo):
"""
Responds with RawGit url for the specified file
"""
res = {}
accessToken = request.session['accessToken']
file = request.data['file']
# branch = request.data['branch']
commit = _getLatestCommit(accessToken, owner, repo)
cdnUrl = 'https://cdn.rawgit.com/{}/{}/{}/{}'
cdnUrl = cdnUrl.format(owner, repo, commit['sha'], file['path'])
return Response({
'cdnUrl': cdnUrl
})
@api_view(['POST'])
def parse(request):
template = request.data['templateFileContent']
jinjaEnv = Environment()
absSynTree = jinjaEnv.parse(template)
keys = list(meta.find_undeclared_variables(absSynTree))
# TODO: Sort it properly:
# Allow whitespaces after/before the curly braces
keys = sorted(keys, key=lambda x:template.index('{{'+x+'}}'))
return Response({
'keys': keys
})
@api_view(['GET'])
def branches(request, repositoryFullName):
"""
Responds with a list of branches in the specified project
"""
accessToken = request.session['accessToken']
getBranchesUrl = 'https://api.github.com/repos/{}/branches?&per_page=100&access_token={}'
getBranchesUrl = getBranchesUrl.format(repositoryFullName, accessToken)
getBranchesUrl = getAuthUrl(getBranchesUrl)
with urlopen(getBranchesUrl) as branchesRes:
resStr = branchesRes.read().decode('utf-8')
res = json.loads(resStr)
return Response({ 'branches': res })
@api_view(['GET'])
def commits(request, owner, repo, branch):
"""
Responds with a list of commits on the specified branch
in the specified repository
"""
accessToken = request.session['accessToken']
getCommitsUrl = 'https://api.github.com/repos/{}/{}/commits?sha={}&access_token={}'
getCommitsUrl = getCommitsUrl.format(owner, repo, branch, accessToken)
getCommitsUrl = getAuthUrl(getCommitsUrl)
with urlopen(getCommitsUrl) as commitsRes:
resStr = commitsRes.read().decode('utf-8')
res = json.loads(resStr)
return Response({ 'commits': res })
def _getLatestCommit(accessToken, repoUsername, projectSlug):
"""
Returns the latest commit object of a repository
"""
commitsUrl = 'https://api.github.com/repos/{}/{}/commits?access_token={}'
commitsUrl = commitsUrl.format(repoUsername, projectSlug, accessToken)
commitsUrl = getAuthUrl(commitsUrl)
with urlopen(commitsUrl) as commitsRes:
res = commitsRes.read().decode('utf-8')
commits = json.loads(res)
# commits[0] is guaranteed
# as every Glide repo has been created with the option 'auto_init': True
return commits[0]
def _getLatestSha(accessToken, repoUsername, projectSlug):
"""
Returns the hash value of the latest commit of a repository
"""
latestCommit = _getLatestCommit(accessToken, repoUsername, projectSlug)
return latestCommit['sha']
def _getRepoTree(accessToken, repoUsername, projectSlug, branch='master', commit=None):
"""
Returns the latest tree structure of a repository.
The branch can be specified. Otherwise, it assumes master.
The commit SHA can be specified. Otherwise, it assumes latest commit.
"""
sha = ''
if not commit:
sha = _getLatestSha(accessToken, repoUsername, projectSlug)
else:
sha = commit
repoTreeUrl = 'https://api.github.com/repos/{}/{}/git/trees/{}?recursive=1?access_token={}'
repoTreeUrl = repoTreeUrl.format(repoUsername, projectSlug, sha, accessToken)
repoTreeUrl = getAuthUrl(repoTreeUrl)
with urlopen(repoTreeUrl) as repoTreeRes:
# TODO: This API request sometimes gives 409 conflicts response. # Why?
res = repoTreeRes.read().decode('utf-8')
repoTree = json.loads(res)
for file in repoTree['tree']:
#
# TODO: File extension?
#
# file['ext'] = file['path'].split('.')[-1]
# if file['path'] == file['ext']:
# # It's a folder
# file['ext'] = None
# # file['downloadUrl'] = None
#
# TODO: Editor type?
#
# file['editor'] = ''
# if file['ext'] in ['glide', 'md', 'yml', 'yaml']:
# file['editor'] = 'data'
# elif file['ext'] in ['html', 'htm']:
# file['editor'] = 'html'
# elif file['ext'] in ['css']:
# file['editor'] = 'css'
#
file['name'] = file['path'].split('/')[-1]
# TODO: Use GitHub Blobs API rather than custom string operations
# downloadUrl = 'https://raw.githubusercontent.com/{}/{}/{}/{}?access_token={}'
# file['downloadUrl'] = downloadUrl.format(repoUsername, projectSlug, branch, file['path'], accessToken)
# repoTree['tree'] = [file for file in repoTree['tree'] if file['type'] != 'tree']
return repoTree
def _createReference(accessToken, owner, repo, ref, refTo):
#
# In case of creating a new branch,
# ref is the new branch name
# and refTo is sha of a commit you branch from.
#
createRefUrl = 'https://api.github.com/repos/{}/{}/git/refs?access_token={}'.format(owner, repo, accessToken)
createRefUrl = getAuthUrl(createRefUrl)
createRefData = {
'ref': 'refs/heads/' + ref,
'sha': refTo
}
createRefData = json.dumps(createRefData).encode('utf-8')
with urlopen(createRefUrl, createRefData) as createRefRes:
resStr = createRefRes.read().decode('utf-8')
return json.loads(resStr)
def _updateReference(accessToken, owner, repo, ref, refTo):
updateRefUrl = 'https://api.github.com/repos/{}/{}/git/refs/{}?access_token={}'
updateRefUrl = updateRefUrl.format(owner, repo, ref, accessToken)
updateRefUrl = getAuthUrl(updateRefUrl)
updateRefData = {
'sha': refTo
}
updateRefData = json.dumps(updateRefData).encode('utf-8')
req = Request(
url=updateRefUrl, headers={'Content-Type': 'application/json'},
data=updateRefData, method='PATCH')
with urlopen(req) as createFileRes:
resStr = updateRefRes.read().decode('utf-8')
return json.loads(resStr)
# updateRefUrl = 'https://api.github.com/repos/{}/{}/git/refs/{}?access_token={}'
# updateRefUrl = updateRefUrl.format(owner, repo, ref, accessToken)
# updateRefUrl = getAuthUrl(updateRefUrl)
# updateRefData = {
# 'sha': refTo
# }
# updateRefData = json.dumps(updateRefData).encode('utf-8')
# with urlopen(updateRefUrl, updateRefData) as updateRefRes:
# resStr = updateRefRes.read().decode('utf-8')
# return json.loads(resStr)
def _mdToHtml(md):
return markdown.markdown(md)
@api_view(['POST'])
def clone(request):
"""
Clone a project from specified GitHub url.
GETs existing repositories on GitHub to check potential name duplicates.
POSTs a new repository on GitHub and create a project instance on Glide server.
"""
accessToken = request.session['accessToken']
# Starting from Django 1.5,
# request.POST object does not contain non-form data anymore (e.g., AJAX).
# It is now in request.data object if using DRF.
repoUrl = request.data['repoUrl']
owner = request.data['owner']
repo = request.data['repo']
getRepoUrl = 'https://api.github.com/repos/{}/{}?access_token={}'.format(owner, repo, accessToken)
getRepoUrl = getAuthUrl(getRepoUrl)
with urlopen(getRepoUrl) as reposRes:
resStr = reposRes.read().decode('utf-8')
return Response({ 'repository': resStr })
@api_view(['GET', 'POST'])
def branch(request, owner=None, repo=None, branch=None):
"""
GETs a branch from GitHub
POSTs a reference on GitHub repo as a new branch
"""
if request.method == 'GET':
accessToken = request.session['accessToken']
# owner = request.data['owner']
# repo = request.data['repo']
# branch = request.data['branch']
getBranchUrl = 'https://api.github.com/repos/{}/{}/branches/{}?access_token={}'
getBranchUrl = getBranchUrl.format(owner, repo, branch, accessToken)
getBranchUrl = getAuthUrl(getBranchUrl)
with urlopen(getBranchUrl) as branchRes:
resStr = branchRes.read().decode('utf-8')
return Response({ 'branch': json.loads(resStr) })
elif request.method == 'POST':
accessToken = request.session['accessToken']
newBranchName = request.data['newBranchName']
shaBranchFrom = request.data['shaBranchFrom']
owner = request.data['owner']
repo = request.data['repo']
createRefRes = _createReference(accessToken, owner, repo, newBranchName, shaBranchFrom)
return Response({
'createRefRes': createRefRes
# 'code': createRefRes.getcode()
})
@api_view(['POST'])
def commit(request):
repositoryFullName = request.data['repository']
tree = request.data['tree']
branch = 'heads/{}'.format(request.data['branch'])
commit = request.data['commit']
message = request.data['message']
accessToken = request.session['accessToken']
# POST the tree
createTreeUrl = 'https://api.github.com/repos/{}/git/trees?access_token={}'
createTreeUrl = createTreeUrl.format(repositoryFullName, accessToken)
createTreeUrl = getAuthUrl(createTreeUrl)
createTreeData = {
'tree': tree
}
createTreeData = json.dumps(createTreeData).encode('utf-8')
with urlopen(createTreeUrl, createTreeData) as createTreeRes:
resStr = createTreeRes.read().decode('utf-8')
res = json.loads(resStr)
treeSha = res['sha']
# Create a commit that points the tree
createCommitUrl = 'https://api.github.com/repos/{}/git/commits?access_token={}'
createCommitUrl = createCommitUrl.format(repositoryFullName, accessToken)
createCommitUrl = getAuthUrl(createCommitUrl)
createCommitData = {
'message': message,
'tree': treeSha,
'parents': [commit] # Merge commits have 2+ parent commits
}
createCommitData = json.dumps(createCommitData).encode('utf-8')
with urlopen(createCommitUrl, createCommitData) as createCommitRes:
resStr = createCommitRes.read().decode('utf-8')
newCommit = json.loads(resStr)
newCommitSha = newCommit['sha']
updateRefUrl = 'https://api.github.com/repos/{}/git/refs/{}?access_token={}'
updateRefUrl = updateRefUrl.format(repositoryFullName, branch, accessToken)
updateRefUrl = getAuthUrl(updateRefUrl)
updateRefData = {
'sha': newCommitSha
}
updateRefData = json.dumps(updateRefData).encode('utf-8')
req = Request(
url=updateRefUrl, headers={'Content-Type': 'application/json'},
data=updateRefData, method='PATCH')
with urlopen(req) as updateRefRes:
resStr = updateRefRes.read().decode('utf-8')
res = json.loads(resStr)
return Response({
'createTreeRes': treeSha,
'createCommitRes': newCommit,
'updateRefRes': res
})
# @api_view(['POST'])
# def createProject(request):
# """
# Create a new instance of project.
# GETs existing repositories on GitHub to check potential name duplicates.
# POSTs a new repository on GitHub and create a project instance on Glide server.
# """
# username = request.session['username']
# repoUsername = username.split('@')[0]
# user = User.objects.filter(username=username)[0]
# accessToken = request.session['accessToken']
# # Starting from Django 1.5,
# # request.POST object does not contain non-form data anymore (e.g., AJAX).
# # It is now in request.data object if using DRF.
# # Also, consider using
# # 'application/x-www-form-urlencoded'
# # for contentType of AJAX data (on client side)
# # rather than 'application/json; charset=utf-8',
# # if something goes wrong.
# title = request.data['title']
# slug = request.data['slug']
# description = request.data['description']
# repoUrl = request.data['repoUrl']
# theme = request.data['theme']
# getAllReposUrl = 'https://api.github.com/user/repos?access_token={}'.format(accessToken)
# getAllReposUrl = getAuthUrl(getAllReposUrl)
# with urlopen(getAllReposUrl) as allReposRes:
# resStr = allReposRes.read().decode('utf-8')
# allReposJson = json.loads(resStr)
# repoNames = [repo['name'] for repo in allReposJson]
# # Check if the repo exists on repoProvider
# if not slug in repoNames:
# # Check if the project exists on Glide server
# if not Project.objects.filter(owner=username, slug=slug):
# # You may create a new repo on repoProvider
# createRepoUrl = 'https://api.github.com/user/repos?access_token={}'.format(accessToken)
# createRepoData = {'name': slug, 'description': description, 'auto_init': True}
# createRepoData = json.dumps(createRepoData).encode('utf-8')
# createRepoUrl = getAuthUrl(createRepoUrl)
# with urlopen(createRepoUrl, createRepoData) as createRepoRes:
# resStr = createRepoRes.read().decode('utf-8')
# # (optional) TODO: Match the model structure with repo data structure?
# theme = Theme.objects.get(slug=theme)
# project = Project.objects.create(
# owner=user,
# title=title,
# slug=slug,
# description=description,
# repoUrl=repoUrl,
# theme=theme
# )
# projects = Project.objects.all()
# #
# # Load theme files and make a commit to start from
# #
# themeTree = _getRepoTree(accessToken, theme.author, theme.slug)
# for file in themeTree['tree']:
# if file['type'] == 'tree' or not file['downloadUrl']:
# continue
# newFile = {}
# newFile['path'] = 'theme/' + file['path']
# with urlopen(file['downloadUrl']) as fileContentRes:
# newFile['originalContent'] = getBase64Bytes(fileContentRes.read())#.decode('utf-8')
# _createFile(accessToken, repoUsername, slug, newFile)
# return Response({
# 'project': ProjectSerializer(project, many=False).data,
# 'projects': ProjectSerializer(projects, many=True).data
# })
# # Error! The project title is being used.
# return Response({'error': 'The project title is being used. Try with a different title.'})
@api_view(['GET', 'POST'])
def tree(request, owner, repo, branch, commit):
"""
Returns tree structure of a project.
The HTTP parameter specifies the project ID (slug).
GETs the latest commit hash value (sha) of the repository.
GETs the tree (file structure in GitHub repo) data for the commit.
Actual content requests and delivery will be on client side (AJAX).
"""
accessToken = request.session['accessToken']
if request.method == 'POST':
pass
# tree = request.data['tree']
# # commit = request.data['commit']
# message = request.data['message']
# # POST the tree from the client
# createTreeUrl = 'https://api.github.com/repos/{}/{}/git/trees?access_token={}'
# createTreeUrl = createTreeUrl.format(owner, repo, accessToken)
# createTreeUrl = getAuthUrl(createTreeUrl)
# createTreeData = {
# 'tree': tree
# }
# createTreeData = json.dumps(createTreeData).encode('utf-8')
# with urlopen(createTreeUrl, createTreeData) as createTreeRes:
# resStr = createTreeRes.read().decode('utf-8')
# shaTree = json.loads(resStr)['sha']
# # Create a commit that points the tree
# createCommitUrl = 'https://api.github.com/repos/{}/{}/git/commits?access_token={}'
# createCommitUrl = createCommitUrl.format(owner, repo, accessToken)
# createCommitUrl = getAuthUrl(createCommitUrl)
# createCommitData = {
# 'message': message,
# 'tree': shaTree,
# 'parents': [commit]
# }
# createCommitData = json.dumps(createCommitData).encode('utf-8')
# with urlopen(createCommitUrl, createCommitData) as createCommitRes:
# resStr = createCommitRes.read().decode('utf-8')
# createCommitRes = json.loads(resStr)
# newCommit = createCommitRes['sha']
# updateRefRes = _updateReference(accessToken, owner, repo, branch, newCommit)
# return Response({
# 'updateRefRes': updateRefRes,
# # 'branch': branch,
# 'newCommit': newCommit
# })
elif request.method == 'GET':
# GET the tree structure of the project repository
repoTree = _getRepoTree(accessToken, owner, repo, branch, commit)
# Set aside a raw tree
# for part of response to return
tree = deepcopy(repoTree)
#
# Resolve path strings:
# Trees from GitHub have meta data on path.
# Build recursive file structrue using the meta data.
#
paths = list(set([file['path'] for file in repoTree['tree'] if file['type']=='tree']))
# paths must have path strings in descending order: deepest first!
paths = sorted(paths, key=lambda x:len(x.split('/')), reverse=True)
def pathToDict(pathString):
"""
Convert a sigle path string into a nested dictionary.
"""
pathList = pathString.split('/')
pathList.reverse()
stack = [{}]
for path in pathList:
stack.append({path: stack.pop()})
return stack[0]
def mergeDicts(d1, d2):
"""
Merge two nested dictionaries into one.
"""
if d1 == {}:
return d2
if d2 == {}:
return d1
res = {}
commonKeys = list(set(d1.keys()) & set(d2.keys()))
for commonKey in commonKeys:
res[commonKey] = mergeDicts(d1[commonKey], d2[commonKey])
for key in list(d1.keys()):
if not key in commonKeys:
res[key] = d1[key]
for key in list(d2.keys()):
if not key in commonKeys:
res[key] = d2[key]
return res
def traversFs(fsDict, stack, f):
"""
Traverses fsDict to return:
{
"file-or-folder-name": {"content": "...", "file": {...}}
}
"""
for folder in list(fsDict.keys()):
stack.append(folder)
traversFs(fsDict[folder], stack, f)
filesInFolder = [file for file in f if file['type'] != 'tree' and file['path'].startswith('/'.join(stack))]
# print('{}: {}'.format('/'.join(stack), filesInFolder))
for file in filesInFolder:
name = file['name']
file['originalContent'] = None
file['modified'] = False
file['added'] = False
fsDict[folder][name] = file
f.remove(file)
stack.pop()
def transformFs(fsDict, res, parentPath):
def _isFolder(x):
if not 'originalContent' in x:
return True
else:
return False
res['nodes'] = []
for key in fsDict:
if _isFolder(fsDict[key]):
path = '/'.join([parentPath, key])
thisFolder = {'name': key, 'path': path, 'type': 'tree'}
res['nodes'].append(transformFs(fsDict[key], thisFolder, path))
else:
fsDict[key]['nodes'] = []
res['nodes'].append(fsDict[key])
return res
pathDicts = []
fs = {}
for path in paths:
pathDict = pathToDict(path)
pathDicts.append(pathDict)
for pathDict in pathDicts:
fs = mergeDicts(fs, pathDict)
# Fill out the file structure with files.
traversFs(fs, [], repoTree['tree'])
# Add files from root
rootFiles = [file for file in repoTree['tree'] if file['path'] == file['name'] and file['type'] != 'tree']
for file in rootFiles:
name = file['name']
file['originalContent'] = None
file['modified'] = False
file['added'] = False
fs[file['name']] = file
# Transform fs for view-friendly form with recursive structure
recursiveTree = transformFs(fs, {}, '')
return Response({ 'recursiveTree': recursiveTree, 'tree': tree })
@api_view(['GET', 'POST'])
def blob(request, owner, repo, sha=None):
"""
GET a blob specified and return it
POST a blob to create it on GitHub
"""
accessToken = request.session['accessToken']
if request.method == 'GET':
# Get a blob
if sha:
getBlobUrl = 'https://api.github.com/repos/{}/{}/git/blobs/{}?access_token={}'
getBlobUrl = getBlobUrl.format(owner, repo, sha, accessToken)
getBlobUrl = getAuthUrl(getBlobUrl)
with urlopen(getBlobUrl) as blobRes:
resStr = blobRes.read().decode('utf-8')
return Response({ 'blob': json.loads(resStr) })
else:
return Response({ 'error': 'sha should be specified' })
elif request.method == 'POST':
# TODO: Create a blob
content = request.data['originalContent']
encoding = 'utf-8'
if 'encoding' in request.data:
encoding = request.data['encoding']
createBlobUrl = 'https://api.github.com/repos/{}/{}/git/blobs?access_token={}'
createBlobUrl = createBlobUrl.format(owner, repo, accessToken)
createBlobUrl = getAuthUrl(createBlobUrl)
createBlobData = { 'content': content, 'encoding': encoding }
createBlobData = json.dumps(createBlobData).encode('utf-8')
with urlopen(createBlobUrl, createBlobData) as createBlobRes:
resStr = createBlobRes.read().decode('utf-8')
createBlobRes = json.loads(resStr)
return Response({
'url': createBlobRes['url'],
'sha': createBlobRes['sha']
})
@api_view(['POST'])
def pr(request):
repositoryFullName = request.data['repository']
head = request.data['head']
base = request.data['base']
title = request.data['pullReqTitle']
body = request.data['pullReqBody']
accessToken = request.session['accessToken']
# POST the pull request
createPullReqUrl = 'https://api.github.com/repos/{}/pulls?access_token={}'
createPullReqUrl = createPullReqUrl.format(repositoryFullName, accessToken)
createPullReqUrl = getAuthUrl(createPullReqUrl)
createPullReqData = {
'title': title,
'head': head,
'base': base,
'body': body
}
createPullReqData = json.dumps(createPullReqData).encode('utf-8')
req = Request(
url=createPullReqUrl, headers={'Content-Type': 'application/json'},
data=createPullReqData, method='POST')
try:
with urlopen(req) as createPullReqRes:
resStr = createPullReqRes.read().decode('utf-8')
return Response({
'createPullReqRes': json.loads(resStr),
'code': createPullReqRes.getcode()
})
except HTTPError as ex:
for e in ex:
e = e.decode('utf-8')
e = json.loads(e)
errors = e['errors']
for error in errors:
if error['resource'] == 'PullRequest' and error['message'].startswith('A pull request already exists for'):
# Pull request already exists for this branch
# Just return
return Response({
'createPullReqRes': None,
'code': None,
'message': 'A pull request already exists.'
})
# POST the pull request
# createPullReqUrl = 'https://api.github.com/repos/{}/pulls?access_token={}'
# createPullReqUrl = createPullReqUrl.format(repositoryFullName, accessToken)
# createPullReqUrl = getAuthUrl(createPullReqUrl)
# createPullReqData = {
# 'title': title,
# 'head': head,
# 'base': base,
# 'body': body
# }
# createPullReqData = json.dumps(createPullReqData).encode('utf-8')
# with urlopen(createPullReqUrl, createPullReqData) as createPullReqRes:
# resStr = createPullReqRes.read().decode('utf-8')
# return Response({
# 'createPullReqRes': json.loads(resStr),
# 'code': createPullReqRes.getcode()
# })
@api_view(['POST'])
def renderFile(request):
"""
Respond with rendered html
Consider using Misaka (https://github.com/FSX/misaka) package,
which has more like GitHub flavored markdown renderer
"""
file = request.data['file']
fileName = file['name']
extension = fileName.split('.')
res = { 'srcDoc': None }
# Decide file types basedon file extension
if len(extension) > 1:
extension = extension[-1]
else:
# No extension: it shouldn't be a folder, though
extension = None
if extension in ['md', 'markdown', 'mdown', 'mkdn', 'mkd']:
# Markdown: file rendering
if 'newContent' in file and file['newContent']:
res['srcDoc'] = _mdToHtml(file['newContent'])
else:
res['srcDoc'] = _mdToHtml(file['originalContent'])
# elif extension in ['html', 'htm']:
# # HTML: site-wide rendering
# if 'newContent' in file and file['newContent']:
# res['src'] = True
# else:
# res['src'] = True
else:
# Unsupported file type
pass
return Response(res)
@api_view(['POST'])
def hardclone(request):
cloneUrl = request.data['cloneUrl']
repositoryFullName = request.data['repository'] # Full name means :owner/:repo_name
branch = request.data['branch']
username = request.session['username'].split('@')[0]
# Create dirs
userBasePathStr = os.path.join(settings.MEDIA_ROOT, 'repos', repositoryFullName, branch, username)
userBasePath = pathlib.Path(userBasePathStr)
if userBasePath.exists():
if userBasePath.is_file():
userBasePath.unlink()
elif userBasePath.is_dir():
shutil.rmtree(userBasePathStr, ignore_errors=True)
userBasePath.mkdir(mode=0o777, parents=True)
# Clone project
cloneCommand = 'git clone -b {} {} {}'.format(branch, cloneUrl, userBasePathStr)
cloneCompProc = subprocess.run(
cloneCommand.split(), stdin=subprocess.PIPE,
input=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
return Response({
'args': cloneCompProc.args,
'returncode': cloneCompProc.returncode,
'stdout': cloneCompProc.stdout,
'stderr': cloneCompProc.stderr
})
@api_view(['POST'])
def newFile(request):
repositoryFullName = request.data['repository'] # Full name means :owner/:repo_name
branch = request.data['branch']
path = request.data['path']
fileName = request.data['fileName']
fileOrFolder = request.data['fileOrFolder']
username = request.session['username'].split('@')[0]
userBasePathStr = os.path.join(settings.MEDIA_ROOT, 'repos', repositoryFullName, branch, username)
if fileOrFolder == 'file':
# Create a text file
newFilePath = pathlib.Path(userBasePathStr) / path / fileName
newFile = {}
newFile['path'] = str(newFilePath)
newFile['path'] = newFile['path'].replace(userBasePathStr, '')
newFile['path'] = newFile['path'][1:] # To remove the leading /
newFile['name'] = os.path.basename(newFile['path'])
newFile['nodes'] = []
newFile['added'] = True
newFile['modified'] = False
newFile['sha'] = None
newFile['url'] = None
newFile['type'] = 'blob'
newFile['mode'] = '100644'
newFile['originalContent'] = ''
newFile['size'] = 0
with open(str(newFilePath), 'w') as nf:
nf.write(newFile['originalContent'])
return Response({
'res': newFilePath.exists(),
'createdFiles': [newFile]
})
elif fileOrFolder == 'folder':
# Create a folder
newFolderPath = pathlib.Path(userBasePathStr) / path / fileName
newFolderPath.mkdir(mode=0o777, parents=True)
newFolder = {
'path': str(newFolderPath).replace(userBasePathStr + '/', ''),
'name': fileName,
'nodes': [],
'type': 'tree',
'mode': '040000'
}
return Response({
'res': newFolderPath.exists(),
'createdFiles': [newFolder]
})
@api_view(['POST'])
@parser_classes((FormParser, MultiPartParser, ))
def uploadFile(request):
"""
Currently supports uploading one file
"""
repositoryFullName = request.data['repository'] # Full name means :owner/:repo_name
branch = request.data['branch']
path = request.data['path']
file = request.data['files']
username = request.session['username'].split('@')[0]
userBasePathStr = os.path.join(settings.MEDIA_ROOT, 'repos', repositoryFullName, branch, username)
# TODO
uploadedFilePath = pathlib.Path(userBasePathStr) / path / file.name
fileContent = file.read()
with open(str(uploadedFilePath), 'wb') as fo:
fo.write(fileContent)
newFile = {}
newFile['path'] = str(uploadedFilePath)
newFile['path'] = newFile['path'].replace(userBasePathStr, '')
newFile['path'] = newFile['path'][1:] # To remove the leading /
newFile['name'] = os.path.basename(newFile['path'])
newFile['nodes'] = []
newFile['added'] = True
newFile['modified'] = False
newFile['sha'] = None
newFile['url'] = None
newFile['type'] = 'blob'
newFile['mode'] = '100644'
# To match encoding / decoding scheme to blobs through GitHub API
newFile['originalContent'] = base64.b64encode(fileContent).decode('utf-8')
# The if block below didn't work for uploaded text files
# (worked for existing text, binary, and uploaded binary, though)
# if _isBinary((newFile['name'])):
# newFile['originalContent'] = base64.b64encode(fileContent).decode('utf-8')
# else:
# newFile['originalContent'] = fileContent.decode('utf-8')
newFile['size'] = os.stat(str(uploadedFilePath)).st_size
return Response({
'res': uploadedFilePath.exists(),
'createdFiles': [newFile],
'size': newFile['size']
})
@api_view(['POST'])
def updateFile(request):
"""
Currently supports text file (not binary)
"""
repositoryFullName = request.data['repository'] # Full name means :owner/:repo_name
branch = request.data['branch']
filePath = request.data['filePath']
newVal = request.data['newVal']
username = request.session['username'].split('@')[0]
userBasePathStr = os.path.join(settings.MEDIA_ROOT, 'repos', repositoryFullName, branch, username)
actualPath = pathlib.Path(userBasePathStr) / filePath
with open(str(actualPath), 'w') as fo:
fo.write(newVal)
return Response({
'res': actualPath.exists(),
'size': os.stat(str(actualPath)).st_size
})
@api_view(['POST'])
def manipulateFile(request):
accessToken = request.session['accessToken']
manipulation = request.data['manipulation']
source = request.data['source']
targetPath = request.data['targetPath']
repositoryFullName = request.data['repository'] # Full name means :owner/:repo_name
branch = request.data['branch']
username = request.session['username'].split('@')[0]
# Create dirs
userBasePathStr = os.path.join(settings.MEDIA_ROOT, 'repos', repositoryFullName, branch, username)
userBasePath = pathlib.Path(userBasePathStr)
# Manipulate file
if manipulation == 'rename':
sourcePath = str(userBasePath / source['path'])
targetPath = str(userBasePath / targetPath)
shutil.move(sourcePath, targetPath)
# Return content to update the new file
fileContent = None
with open(targetPath, 'rb') as f:
fileContent = f.read()
content = base64.b64encode(fileContent).decode('utf-8')
return Response({
'content': content
})
elif manipulation == 'delete':
targetPath = str(userBasePath / targetPath)
if os.path.isfile(targetPath):
os.remove(targetPath)
else:
shutil.rmtree(targetPath, ignore_errors=True)
return Response({
'targetPath': targetPath
})
elif manipulation == 'copy':
sourcePath = str(userBasePath / source['path'])
targetPath = str(userBasePath / targetPath)
dupName = ''
if sourcePath == targetPath + '/' + source['name']:
# Duplicate in the same folder
nameSplit = source['name'].split('.')
if len(nameSplit) > 1:
# nameSplit.append(nameSplit[-1])
nameSplit[-2] += ' copy'
dupName = '.'.join(nameSplit)
else:
dupName = source['name'] + ' copy'
else:
# Copy to another folder
dupName = source['name']
targetPath += '/' + dupName
res = shutil.copy(sourcePath, targetPath)
# Return content to update the new file
fileContent = None
with open(targetPath, 'rb') as f:
fileContent = f.read()
content = base64.b64encode(fileContent).decode('utf-8')
return Response({
'content': content,
'targetPath': res.replace(userBasePathStr + '/', ''),
'targetName': os.path.basename(res)
})
| stlim0730/glide | api/views.py | Python | mit | 35,037 | 0.013643 |
# implement samba_tool drs commands
#
# Copyright Andrew Tridgell 2010
# Copyright Andrew Bartlett 2017
#
# based on C implementation by Kamen Mazdrashki <kamen.mazdrashki@postpath.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import samba.getopt as options
import ldb
import logging
from . import common
import json
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
Option,
SuperCommand,
)
from samba.samdb import SamDB
from samba import drs_utils, nttime2string, dsdb
from samba.dcerpc import drsuapi, misc
from samba.join import join_clone
from samba import colour
from samba.uptodateness import (
get_partition_maps,
get_utdv_edges,
get_utdv_distances,
get_utdv_summary,
get_kcc_and_dsas,
)
from samba.common import get_string
from samba.samdb import get_default_backend_store
def drsuapi_connect(ctx):
'''make a DRSUAPI connection to the server'''
try:
(ctx.drsuapi, ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drsuapi_connect(ctx.server, ctx.lp, ctx.creds)
except Exception as e:
raise CommandError("DRS connection to %s failed" % ctx.server, e)
def samdb_connect(ctx):
'''make a ldap connection to the server'''
try:
ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
session_info=system_session(),
credentials=ctx.creds, lp=ctx.lp)
except Exception as e:
raise CommandError("LDAP connection to %s failed" % ctx.server, e)
def drs_errmsg(werr):
'''return "was successful" or an error string'''
(ecode, estring) = werr
if ecode == 0:
return "was successful"
return "failed, result %u (%s)" % (ecode, estring)
def attr_default(msg, attrname, default):
'''get an attribute from a ldap msg with a default'''
if attrname in msg:
return msg[attrname][0]
return default
def drs_parse_ntds_dn(ntds_dn):
'''parse a NTDS DN returning a site and server'''
a = ntds_dn.split(',')
if a[0] != "CN=NTDS Settings" or a[2] != "CN=Servers" or a[4] != 'CN=Sites':
raise RuntimeError("bad NTDS DN %s" % ntds_dn)
server = a[1].split('=')[1]
site = a[3].split('=')[1]
return (site, server)
DEFAULT_SHOWREPL_FORMAT = 'classic'
class cmd_drs_showrepl(Command):
"""Show replication status."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("--json", help="replication details in JSON format",
dest='format', action='store_const', const='json'),
Option("--summary", help=("summarize overall DRS health as seen "
"from this server"),
dest='format', action='store_const', const='summary'),
Option("--pull-summary", help=("Have we successfully replicated "
"from all relevent servers?"),
dest='format', action='store_const', const='pull_summary'),
Option("--notify-summary", action='store_const',
const='notify_summary', dest='format',
help=("Have we successfully notified all relevent servers of "
"local changes, and did they say they successfully "
"replicated?")),
Option("--classic", help="print local replication details",
dest='format', action='store_const', const='classic',
default=DEFAULT_SHOWREPL_FORMAT),
Option("-v", "--verbose", help="Be verbose", action="store_true"),
Option("--color", help="Use colour output (yes|no|auto)",
default='no'),
]
takes_args = ["DC?"]
def parse_neighbour(self, n):
"""Convert an ldb neighbour object into a python dictionary"""
dsa_objectguid = str(n.source_dsa_obj_guid)
d = {
'NC dn': n.naming_context_dn,
"DSA objectGUID": dsa_objectguid,
"last attempt time": nttime2string(n.last_attempt),
"last attempt message": drs_errmsg(n.result_last_attempt),
"consecutive failures": n.consecutive_sync_failures,
"last success": nttime2string(n.last_success),
"NTDS DN": str(n.source_dsa_obj_dn),
'is deleted': False
}
try:
self.samdb.search(base="<GUID=%s>" % dsa_objectguid,
scope=ldb.SCOPE_BASE,
attrs=[])
except ldb.LdbError as e:
(errno, _) = e.args
if errno == ldb.ERR_NO_SUCH_OBJECT:
d['is deleted'] = True
else:
raise
try:
(site, server) = drs_parse_ntds_dn(n.source_dsa_obj_dn)
d["DSA"] = "%s\\%s" % (site, server)
except RuntimeError:
pass
return d
def print_neighbour(self, d):
'''print one set of neighbour information'''
self.message("%s" % d['NC dn'])
if 'DSA' in d:
self.message("\t%s via RPC" % d['DSA'])
else:
self.message("\tNTDS DN: %s" % d['NTDS DN'])
self.message("\t\tDSA object GUID: %s" % d['DSA objectGUID'])
self.message("\t\tLast attempt @ %s %s" % (d['last attempt time'],
d['last attempt message']))
self.message("\t\t%u consecutive failure(s)." %
d['consecutive failures'])
self.message("\t\tLast success @ %s" % d['last success'])
self.message("")
def get_neighbours(self, info_type):
req1 = drsuapi.DsReplicaGetInfoRequest1()
req1.info_type = info_type
try:
(info_type, info) = self.drsuapi.DsReplicaGetInfo(
self.drsuapi_handle, 1, req1)
except Exception as e:
raise CommandError("DsReplicaGetInfo of type %u failed" % info_type, e)
reps = [self.parse_neighbour(n) for n in info.array]
return reps
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None,
format=DEFAULT_SHOWREPL_FORMAT,
verbose=False, color='no'):
self.apply_colour_choice(color)
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
self.verbose = verbose
output_function = {
'summary': self.summary_output,
'notify_summary': self.notify_summary_output,
'pull_summary': self.pull_summary_output,
'json': self.json_output,
'classic': self.classic_output,
}.get(format)
if output_function is None:
raise CommandError("unknown showrepl format %s" % format)
return output_function()
def json_output(self):
data = self.get_local_repl_data()
del data['site']
del data['server']
json.dump(data, self.outf, indent=2)
def summary_output_handler(self, typeof_output):
"""Print a short message if every seems fine, but print details of any
links that seem broken."""
failing_repsto = []
failing_repsfrom = []
local_data = self.get_local_repl_data()
if typeof_output != "pull_summary":
for rep in local_data['repsTo']:
if rep['is deleted']:
continue
if rep["consecutive failures"] != 0 or rep["last success"] == 0:
failing_repsto.append(rep)
if typeof_output != "notify_summary":
for rep in local_data['repsFrom']:
if rep['is deleted']:
continue
if rep["consecutive failures"] != 0 or rep["last success"] == 0:
failing_repsfrom.append(rep)
if failing_repsto or failing_repsfrom:
self.message(colour.c_RED("There are failing connections"))
if failing_repsto:
self.message(colour.c_RED("Failing outbound connections:"))
for rep in failing_repsto:
self.print_neighbour(rep)
if failing_repsfrom:
self.message(colour.c_RED("Failing inbound connection:"))
for rep in failing_repsfrom:
self.print_neighbour(rep)
return 1
self.message(colour.c_GREEN("[ALL GOOD]"))
def summary_output(self):
return self.summary_output_handler("summary")
def notify_summary_output(self):
return self.summary_output_handler("notify_summary")
def pull_summary_output(self):
return self.summary_output_handler("pull_summary")
def get_local_repl_data(self):
drsuapi_connect(self)
samdb_connect(self)
# show domain information
ntds_dn = self.samdb.get_dsServiceName()
(site, server) = drs_parse_ntds_dn(ntds_dn)
try:
ntds = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=['options', 'objectGUID', 'invocationId'])
except Exception as e:
raise CommandError("Failed to search NTDS DN %s" % ntds_dn)
dsa_details = {
"options": int(attr_default(ntds[0], "options", 0)),
"objectGUID": get_string(self.samdb.schema_format_value(
"objectGUID", ntds[0]["objectGUID"][0])),
"invocationId": get_string(self.samdb.schema_format_value(
"objectGUID", ntds[0]["invocationId"][0]))
}
conn = self.samdb.search(base=ntds_dn, expression="(objectClass=nTDSConnection)")
repsfrom = self.get_neighbours(drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS)
repsto = self.get_neighbours(drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO)
conn_details = []
for c in conn:
c_rdn, sep, c_server_dn = str(c['fromServer'][0]).partition(',')
d = {
'name': str(c['name']),
'remote DN': str(c['fromServer'][0]),
'options': int(attr_default(c, 'options', 0)),
'enabled': (get_string(attr_default(c, 'enabledConnection',
'TRUE')).upper() == 'TRUE')
}
conn_details.append(d)
try:
c_server_res = self.samdb.search(base=c_server_dn,
scope=ldb.SCOPE_BASE,
attrs=["dnsHostName"])
d['dns name'] = str(c_server_res[0]["dnsHostName"][0])
except ldb.LdbError as e:
(errno, _) = e.args
if errno == ldb.ERR_NO_SUCH_OBJECT:
d['is deleted'] = True
except (KeyError, IndexError):
pass
d['replicates NC'] = []
for r in c.get('mS-DS-ReplicatesNCReason', []):
a = str(r).split(':')
d['replicates NC'].append((a[3], int(a[2])))
return {
'dsa': dsa_details,
'repsFrom': repsfrom,
'repsTo': repsto,
'NTDSConnections': conn_details,
'site': site,
'server': server
}
def classic_output(self):
data = self.get_local_repl_data()
dsa_details = data['dsa']
repsfrom = data['repsFrom']
repsto = data['repsTo']
conn_details = data['NTDSConnections']
site = data['site']
server = data['server']
self.message("%s\\%s" % (site, server))
self.message("DSA Options: 0x%08x" % dsa_details["options"])
self.message("DSA object GUID: %s" % dsa_details["objectGUID"])
self.message("DSA invocationId: %s\n" % dsa_details["invocationId"])
self.message("==== INBOUND NEIGHBORS ====\n")
for n in repsfrom:
self.print_neighbour(n)
self.message("==== OUTBOUND NEIGHBORS ====\n")
for n in repsto:
self.print_neighbour(n)
reasons = ['NTDSCONN_KCC_GC_TOPOLOGY',
'NTDSCONN_KCC_RING_TOPOLOGY',
'NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY',
'NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY',
'NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY',
'NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY',
'NTDSCONN_KCC_INTERSITE_TOPOLOGY',
'NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY',
'NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY',
'NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY']
self.message("==== KCC CONNECTION OBJECTS ====\n")
for d in conn_details:
self.message("Connection --")
if d.get('is deleted'):
self.message("\tWARNING: Connection to DELETED server!")
self.message("\tConnection name: %s" % d['name'])
self.message("\tEnabled : %s" % str(d['enabled']).upper())
self.message("\tServer DNS name : %s" % d.get('dns name'))
self.message("\tServer DN name : %s" % d['remote DN'])
self.message("\t\tTransportType: RPC")
self.message("\t\toptions: 0x%08X" % d['options'])
if d['replicates NC']:
for nc, reason in d['replicates NC']:
self.message("\t\tReplicatesNC: %s" % nc)
self.message("\t\tReason: 0x%08x" % reason)
for s in reasons:
if getattr(dsdb, s, 0) & reason:
self.message("\t\t\t%s" % s)
else:
self.message("Warning: No NC replicated for Connection!")
class cmd_drs_kcc(Command):
"""Trigger knowledge consistency center run."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
req1 = drsuapi.DsExecuteKCC1()
try:
self.drsuapi.DsExecuteKCC(self.drsuapi_handle, 1, req1)
except Exception as e:
raise CommandError("DsExecuteKCC failed", e)
self.message("Consistency check on %s successful." % DC)
class cmd_drs_replicate(Command):
"""Replicate a naming context between two DCs."""
synopsis = "%prog <destinationDC> <sourceDC> <NC> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DEST_DC", "SOURCE_DC", "NC"]
takes_options = [
Option("--add-ref", help="use ADD_REF to add to repsTo on source", action="store_true"),
Option("--sync-forced", help="use SYNC_FORCED to force inbound replication", action="store_true"),
Option("--sync-all", help="use SYNC_ALL to replicate from all DCs", action="store_true"),
Option("--full-sync", help="resync all objects", action="store_true"),
Option("--local", help="pull changes directly into the local database (destination DC is ignored)", action="store_true"),
Option("--local-online", help="pull changes into the local database (destination DC is ignored) as a normal online replication", action="store_true"),
Option("--async-op", help="use ASYNC_OP for the replication", action="store_true"),
Option("--single-object", help="Replicate only the object specified, instead of the whole Naming Context (only with --local)", action="store_true"),
]
def drs_local_replicate(self, SOURCE_DC, NC, full_sync=False,
single_object=False,
sync_forced=False):
'''replicate from a source DC to the local SAM'''
self.server = SOURCE_DC
drsuapi_connect(self)
# Override the default flag LDB_FLG_DONT_CREATE_DB
self.local_samdb = SamDB(session_info=system_session(), url=None,
credentials=self.creds, lp=self.lp,
flags=0)
self.samdb = SamDB(url="ldap://%s" % self.server,
session_info=system_session(),
credentials=self.creds, lp=self.lp)
# work out the source and destination GUIDs
res = self.local_samdb.search(base="", scope=ldb.SCOPE_BASE,
attrs=["dsServiceName"])
self.ntds_dn = res[0]["dsServiceName"][0]
res = self.local_samdb.search(base=self.ntds_dn, scope=ldb.SCOPE_BASE,
attrs=["objectGUID"])
self.ntds_guid = misc.GUID(
self.samdb.schema_format_value("objectGUID",
res[0]["objectGUID"][0]))
source_dsa_invocation_id = misc.GUID(self.samdb.get_invocation_id())
dest_dsa_invocation_id = misc.GUID(self.local_samdb.get_invocation_id())
destination_dsa_guid = self.ntds_guid
exop = drsuapi.DRSUAPI_EXOP_NONE
if single_object:
exop = drsuapi.DRSUAPI_EXOP_REPL_OBJ
full_sync = True
self.samdb.transaction_start()
repl = drs_utils.drs_Replicate("ncacn_ip_tcp:%s[seal]" % self.server,
self.lp,
self.creds, self.local_samdb,
dest_dsa_invocation_id)
# Work out if we are an RODC, so that a forced local replicate
# with the admin pw does not sync passwords
rodc = self.local_samdb.am_rodc()
try:
(num_objects, num_links) = repl.replicate(NC,
source_dsa_invocation_id,
destination_dsa_guid,
rodc=rodc,
full_sync=full_sync,
exop=exop,
sync_forced=sync_forced)
except Exception as e:
raise CommandError("Error replicating DN %s" % NC, e)
self.samdb.transaction_commit()
if full_sync:
self.message("Full Replication of all %d objects and %d links "
"from %s to %s was successful." %
(num_objects, num_links, SOURCE_DC,
self.local_samdb.url))
else:
self.message("Incremental replication of %d objects and %d links "
"from %s to %s was successful." %
(num_objects, num_links, SOURCE_DC,
self.local_samdb.url))
def run(self, DEST_DC, SOURCE_DC, NC,
add_ref=False, sync_forced=False, sync_all=False, full_sync=False,
local=False, local_online=False, async_op=False, single_object=False,
sambaopts=None, credopts=None, versionopts=None):
self.server = DEST_DC
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
if local:
self.drs_local_replicate(SOURCE_DC, NC, full_sync=full_sync,
single_object=single_object,
sync_forced=sync_forced)
return
if local_online:
server_bind = drsuapi.drsuapi("irpc:dreplsrv", lp_ctx=self.lp)
server_bind_handle = misc.policy_handle()
else:
drsuapi_connect(self)
server_bind = self.drsuapi
server_bind_handle = self.drsuapi_handle
if not async_op:
# Give the sync replication 5 minutes time
server_bind.request_timeout = 5 * 60
samdb_connect(self)
# we need to find the NTDS GUID of the source DC
msg = self.samdb.search(base=self.samdb.get_config_basedn(),
expression="(&(objectCategory=server)(|(name=%s)(dNSHostName=%s)))" % (
ldb.binary_encode(SOURCE_DC),
ldb.binary_encode(SOURCE_DC)),
attrs=[])
if len(msg) == 0:
raise CommandError("Failed to find source DC %s" % SOURCE_DC)
server_dn = msg[0]['dn']
msg = self.samdb.search(base=server_dn, scope=ldb.SCOPE_ONELEVEL,
expression="(|(objectCategory=nTDSDSA)(objectCategory=nTDSDSARO))",
attrs=['objectGUID', 'options'])
if len(msg) == 0:
raise CommandError("Failed to find source NTDS DN %s" % SOURCE_DC)
source_dsa_guid = msg[0]['objectGUID'][0]
dsa_options = int(attr_default(msg, 'options', 0))
req_options = 0
if not (dsa_options & dsdb.DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL):
req_options |= drsuapi.DRSUAPI_DRS_WRIT_REP
if add_ref:
req_options |= drsuapi.DRSUAPI_DRS_ADD_REF
if sync_forced:
req_options |= drsuapi.DRSUAPI_DRS_SYNC_FORCED
if sync_all:
req_options |= drsuapi.DRSUAPI_DRS_SYNC_ALL
if full_sync:
req_options |= drsuapi.DRSUAPI_DRS_FULL_SYNC_NOW
if async_op:
req_options |= drsuapi.DRSUAPI_DRS_ASYNC_OP
try:
drs_utils.sendDsReplicaSync(server_bind, server_bind_handle, source_dsa_guid, NC, req_options)
except drs_utils.drsException as estr:
raise CommandError("DsReplicaSync failed", estr)
if async_op:
self.message("Replicate from %s to %s was started." % (SOURCE_DC, DEST_DC))
else:
self.message("Replicate from %s to %s was successful." % (SOURCE_DC, DEST_DC))
class cmd_drs_bind(Command):
"""Show DRS capabilities of a server."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
def run(self, DC=None, sambaopts=None,
credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
drsuapi_connect(self)
bind_info = drsuapi.DsBindInfoCtr()
bind_info.length = 28
bind_info.info = drsuapi.DsBindInfo28()
(info, handle) = self.drsuapi.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
optmap = [
("DRSUAPI_SUPPORTED_EXTENSION_BASE", "DRS_EXT_BASE"),
("DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION", "DRS_EXT_ASYNCREPL"),
("DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI", "DRS_EXT_REMOVEAPI"),
("DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2", "DRS_EXT_MOVEREQ_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS", "DRS_EXT_GETCHG_DEFLATE"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1", "DRS_EXT_DCINFO_V1"),
("DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION", "DRS_EXT_RESTORE_USN_OPTIMIZATION"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY", "DRS_EXT_ADDENTRY"),
("DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE", "DRS_EXT_KCC_EXECUTE"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2", "DRS_EXT_ADDENTRY_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION", "DRS_EXT_LINKED_VALUE_REPLICATION"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2", "DRS_EXT_DCINFO_V2"),
("DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD", "DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD"),
("DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND", "DRS_EXT_CRYPTO_BIND"),
("DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO", "DRS_EXT_GET_REPL_INFO"),
("DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION", "DRS_EXT_STRONG_ENCRYPTION"),
("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01", "DRS_EXT_DCINFO_VFFFFFFFF"),
("DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP", "DRS_EXT_TRANSITIVE_MEMBERSHIP"),
("DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY", "DRS_EXT_ADD_SID_HISTORY"),
("DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3", "DRS_EXT_POST_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5", "DRS_EXT_GETCHGREQ_V5"),
("DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2", "DRS_EXT_GETMEMBERSHIPS2"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6", "DRS_EXT_GETCHGREQ_V6"),
("DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS", "DRS_EXT_NONDOMAIN_NCS"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8", "DRS_EXT_GETCHGREQ_V8"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5", "DRS_EXT_GETCHGREPLY_V5"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6", "DRS_EXT_GETCHGREPLY_V6"),
("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT", "DRS_EXT_WHISTLER_BETA3"),
("DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS", "DRS_EXT_W2K3_DEFLATE"),
("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10", "DRS_EXT_GETCHGREQ_V10"),
("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART2", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2"),
("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART3", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3")
]
optmap_ext = [
("DRSUAPI_SUPPORTED_EXTENSION_ADAM", "DRS_EXT_ADAM"),
("DRSUAPI_SUPPORTED_EXTENSION_LH_BETA2", "DRS_EXT_LH_BETA2"),
("DRSUAPI_SUPPORTED_EXTENSION_RECYCLE_BIN", "DRS_EXT_RECYCLE_BIN")]
self.message("Bind to %s succeeded." % DC)
self.message("Extensions supported:")
for (opt, str) in optmap:
optval = getattr(drsuapi, opt, 0)
if info.info.supported_extensions & optval:
yesno = "Yes"
else:
yesno = "No "
self.message(" %-60s: %s (%s)" % (opt, yesno, str))
if isinstance(info.info, drsuapi.DsBindInfo48):
self.message("\nExtended Extensions supported:")
for (opt, str) in optmap_ext:
optval = getattr(drsuapi, opt, 0)
if info.info.supported_extensions_ext & optval:
yesno = "Yes"
else:
yesno = "No "
self.message(" %-60s: %s (%s)" % (opt, yesno, str))
self.message("\nSite GUID: %s" % info.info.site_guid)
self.message("Repl epoch: %u" % info.info.repl_epoch)
if isinstance(info.info, drsuapi.DsBindInfo48):
self.message("Forest GUID: %s" % info.info.config_dn_guid)
class cmd_drs_options(Command):
"""Query or change 'options' for NTDS Settings object of a Domain Controller."""
synopsis = "%prog [<DC>] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_args = ["DC?"]
takes_options = [
Option("--dsa-option", help="DSA option to enable/disable", type="str",
metavar="{+|-}IS_GC | {+|-}DISABLE_INBOUND_REPL | {+|-}DISABLE_OUTBOUND_REPL | {+|-}DISABLE_NTDSCONN_XLATE"),
]
option_map = {"IS_GC": 0x00000001,
"DISABLE_INBOUND_REPL": 0x00000002,
"DISABLE_OUTBOUND_REPL": 0x00000004,
"DISABLE_NTDSCONN_XLATE": 0x00000008}
def run(self, DC=None, dsa_option=None,
sambaopts=None, credopts=None, versionopts=None):
self.lp = sambaopts.get_loadparm()
if DC is None:
DC = common.netcmd_dnsname(self.lp)
self.server = DC
self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
samdb_connect(self)
ntds_dn = self.samdb.get_dsServiceName()
res = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=["options"])
dsa_opts = int(res[0]["options"][0])
# print out current DSA options
cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
self.message("Current DSA options: " + ", ".join(cur_opts))
# modify options
if dsa_option:
if dsa_option[:1] not in ("+", "-"):
raise CommandError("Unknown option %s" % dsa_option)
flag = dsa_option[1:]
if flag not in self.option_map.keys():
raise CommandError("Unknown option %s" % dsa_option)
if dsa_option[:1] == "+":
dsa_opts |= self.option_map[flag]
else:
dsa_opts &= ~self.option_map[flag]
# save new options
m = ldb.Message()
m.dn = ldb.Dn(self.samdb, ntds_dn)
m["options"] = ldb.MessageElement(str(dsa_opts), ldb.FLAG_MOD_REPLACE, "options")
self.samdb.modify(m)
# print out new DSA options
cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
self.message("New DSA options: " + ", ".join(cur_opts))
class cmd_drs_clone_dc_database(Command):
"""Replicate an initial clone of domain, but DO NOT JOIN it."""
synopsis = "%prog <dnsdomain> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("--server", help="DC to join", type=str),
Option("--targetdir", help="where to store provision (required)", type=str),
Option("-q", "--quiet", help="Be quiet", action="store_true"),
Option("--include-secrets", help="Also replicate secret values", action="store_true"),
Option("--backend-store", type="choice", metavar="BACKENDSTORE",
choices=["tdb", "mdb"],
help="Specify the database backend to be used "
"(default is %s)" % get_default_backend_store()),
Option("--backend-store-size", type="bytes", metavar="SIZE",
help="Specify the size of the backend database, currently" +
"only supported by lmdb backends (default is 8 Gb).")
]
takes_args = ["domain"]
def run(self, domain, sambaopts=None, credopts=None,
versionopts=None, server=None, targetdir=None,
quiet=False, verbose=False, include_secrets=False,
backend_store=None, backend_store_size=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
logger = self.get_logger(verbose=verbose, quiet=quiet)
if targetdir is None:
raise CommandError("--targetdir option must be specified")
join_clone(logger=logger, server=server, creds=creds, lp=lp,
domain=domain, dns_backend='SAMBA_INTERNAL',
targetdir=targetdir, include_secrets=include_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
class cmd_drs_uptodateness(Command):
"""Show uptodateness status"""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", metavar="URL", dest="H",
help="LDB URL for database or target server"),
Option("-p", "--partition",
help="restrict to this partition"),
Option("--json", action='store_true',
help="Print data in json format"),
Option("--maximum", action='store_true',
help="Print maximum out-of-date-ness only"),
Option("--median", action='store_true',
help="Print median out-of-date-ness only"),
Option("--full", action='store_true',
help="Print full out-of-date-ness data"),
]
def format_as_json(self, partitions_summaries):
return json.dumps(partitions_summaries, indent=2)
def format_as_text(self, partitions_summaries):
lines = []
for part_name, summary in partitions_summaries.items():
items = ['%s: %s' % (k, v) for k, v in summary.items()]
line = '%-15s %s' % (part_name, ' '.join(items))
lines.append(line)
return '\n'.join(lines)
def run(self, H=None, partition=None,
json=False, maximum=False, median=False, full=False,
sambaopts=None, credopts=None, versionopts=None,
quiet=False, verbose=False):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
local_kcc, dsas = get_kcc_and_dsas(H, lp, creds)
samdb = local_kcc.samdb
short_partitions, _ = get_partition_maps(samdb)
if partition:
if partition in short_partitions:
part_dn = short_partitions[partition]
# narrow down to specified partition only
short_partitions = {partition: part_dn}
else:
raise CommandError("unknown partition %s" % partition)
filters = []
if maximum:
filters.append('maximum')
if median:
filters.append('median')
partitions_distances = {}
partitions_summaries = {}
for part_name, part_dn in short_partitions.items():
utdv_edges = get_utdv_edges(local_kcc, dsas, part_dn, lp, creds)
distances = get_utdv_distances(utdv_edges, dsas)
summary = get_utdv_summary(distances, filters=filters)
partitions_distances[part_name] = distances
partitions_summaries[part_name] = summary
if full:
# always print json format
output = self.format_as_json(partitions_distances)
else:
if json:
output = self.format_as_json(partitions_summaries)
else:
output = self.format_as_text(partitions_summaries)
print(output, file=self.outf)
class cmd_drs(SuperCommand):
"""Directory Replication Services (DRS) management."""
subcommands = {}
subcommands["bind"] = cmd_drs_bind()
subcommands["kcc"] = cmd_drs_kcc()
subcommands["replicate"] = cmd_drs_replicate()
subcommands["showrepl"] = cmd_drs_showrepl()
subcommands["options"] = cmd_drs_options()
subcommands["clone-dc-database"] = cmd_drs_clone_dc_database()
subcommands["uptodateness"] = cmd_drs_uptodateness()
| kernevil/samba | python/samba/netcmd/drs.py | Python | gpl-3.0 | 36,173 | 0.001603 |
#!/usr/bin/env python
from datetime import timedelta
import os
import random
from django.utils.dateparse import parse_date
from faker import Faker
test_email = 'michael.b001@gmx.de'
fake = Faker('de')
fake.seed(1)
random.seed(1)
def get_random_date():
return parse_date('1983-03-31') + timedelta(days=random.randint(-5000,
1000))
def populate():
for _ in range(100):
candidate = add_candidate(first_name=fake.first_name(),
last_name=fake.last_name(),
date_of_birth=get_random_date())
add_registration(candidate=candidate,
bicycle_kind=random.randint(1, 4),
email=fake.email())
def add_candidate(first_name, last_name, date_of_birth):
return Candidate.objects.create(first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth)
def add_registration(candidate, bicycle_kind, email):
return UserRegistration.objects.create(candidate=candidate,
bicycle_kind=bicycle_kind,
email=email)
def add_event(due_date):
return HandoutEvent.objects.create(due_date=due_date)
def add_bicycle():
b = Bicycle.objects.create()
return b
# Start execution here!
if __name__ == '__main__':
print("Starting FIRST_APP population script...")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bwb.settings')
import django
django.setup()
from register.models import UserRegistration, Candidate, Bicycle
from register.models import HandoutEvent
populate()
| michaelbratsch/bwb | populate.py | Python | gpl-3.0 | 1,775 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to bridge `Distribution`s and `tf.contrib.learn.estimator` APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators.head import _compute_weighted_loss
from tensorflow.contrib.learn.python.learn.estimators.head import _RegressionHead
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
__all__ = [
"estimator_head_distribution_regression",
]
def estimator_head_distribution_regression(make_distribution_fn,
label_dimension=1,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for regression under a generic distribution.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the last
dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None` if
label is a `Tensor` (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure learns
the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and metrics
keys are suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
Returns:
An instance of `Head` for generic regression.
"""
return _DistributionRegressionHead(
make_distribution_fn=make_distribution_fn,
label_dimension=label_dimension,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
class _DistributionRegressionHead(_RegressionHead):
"""Creates a _RegressionHead instance from an arbitrary `Distribution`."""
def __init__(self,
make_distribution_fn,
label_dimension,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the
last dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None`
if label is a tensor (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure
learns the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and
metrics keys are suffixed by `"/" + head_name` and the default variable
scope is `head_name`.
Raises:
TypeError: if `make_distribution_fn` is not `callable`.
"""
if not callable(make_distribution_fn):
raise TypeError("`make_distribution_fn` must be a callable function.")
self._distributions = {}
self._make_distribution_fn = make_distribution_fn
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
def loss_fn(labels, logits, weights=None):
"""Returns the loss of using `logits` to predict `labels`."""
d = self.distribution(logits)
labels_batch_shape = labels.shape.with_rank_at_least(1)[:-1]
labels_batch_shape = (
labels_batch_shape.as_list() if labels_batch_shape.is_fully_defined()
else array_ops.shape(labels)[:-1])
labels = array_ops.reshape(
labels,
shape=concat_vectors(labels_batch_shape, d.event_shape_tensor()))
return _compute_weighted_loss(
loss_unweighted=-d.log_prob(labels),
weight=weights)
def link_fn(logits):
"""Returns the inverse link function at `logits`."""
# Note: What the API calls a "link function" is really the inverse-link
# function, i.e., the "mean".
d = self.distribution(logits)
return d.mean()
super(_DistributionRegressionHead, self).__init__(
label_dimension=label_dimension,
loss_fn=loss_fn,
link_fn=link_fn,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
@property
def distributions(self):
"""Returns all distributions created by `DistributionRegressionHead`."""
return self._distributions
def distribution(self, logits, name=None):
"""Retrieves a distribution instance, parameterized by `logits`.
Args:
logits: `float`-like `Tensor` representing the parameters of the
underlying distribution.
name: The Python `str` name to given to this op.
Default value: "distribution".
Returns:
distribution: `tf.Distribution` instance parameterized by `logits`.
"""
with ops.name_scope(name, "distribution", [logits]):
d = self._distributions.get(logits, None)
if d is None:
d = self._make_distribution_fn(logits)
self._distributions[logits] = d
return d
| allenlavoie/tensorflow | tensorflow/contrib/distributions/python/ops/estimator.py | Python | apache-2.0 | 7,908 | 0.004299 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
from hashlib import md5
# This must be run before importing Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
from elasticsearch import Elasticsearch, helpers
from translate.storage import factory
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import dateparse
from django.utils.encoding import force_bytes
from pootle.core.utils import dateformat
from pootle_store.models import Unit
from pootle_translationproject.models import TranslationProject
BULK_CHUNK_SIZE = 5000
class BaseParser(object):
def __init__(self, *args, **kwargs):
"""Initialize the parser."""
self.stdout = kwargs.pop('stdout')
self.INDEX_NAME = kwargs.pop('index', None)
def get_units(self):
"""Gets the units to import and its total count."""
raise NotImplementedError
def get_unit_data(self, unit):
"""Return dict with data to import for a single unit."""
raise NotImplementedError
class DBParser(BaseParser):
def __init__(self, *args, **kwargs):
super(DBParser, self).__init__(*args, **kwargs)
self.exclude_disabled_projects = not kwargs.pop('disabled_projects')
self.tp_pk = None
def get_units(self):
"""Gets the units to import and its total count."""
units_qs = (
Unit.objects.exclude(target_f__isnull=True)
.exclude(target_f__exact='')
.filter(store__translation_project__pk=self.tp_pk)
.filter(revision__gt=self.last_indexed_revision))
units_qs = units_qs.select_related(
'change__submitted_by',
'store',
'store__translation_project__project',
'store__translation_project__language')
if self.exclude_disabled_projects:
units_qs = units_qs.exclude(
store__translation_project__project__disabled=True
).exclude(store__obsolete=True)
units_qs = units_qs.values(
'id',
'revision',
'source_f',
'target_f',
'change__submitted_on',
'change__submitted_by__username',
'change__submitted_by__full_name',
'change__submitted_by__email',
'store__translation_project__project__fullname',
'store__pootle_path',
'store__translation_project__language__code'
).order_by()
return units_qs.iterator(), units_qs.count()
def get_unit_data(self, unit):
"""Return dict with data to import for a single unit."""
fullname = (unit['change__submitted_by__full_name'] or
unit['change__submitted_by__username'])
email_md5 = None
if unit['change__submitted_by__email']:
email_md5 = md5(
force_bytes(unit['change__submitted_by__email'])).hexdigest()
iso_submitted_on = unit.get('change__submitted_on', None)
display_submitted_on = None
if iso_submitted_on:
display_submitted_on = dateformat.format(
dateparse.parse_datetime(str(iso_submitted_on))
)
return {
'_index': self.INDEX_NAME,
'_type': unit['store__translation_project__language__code'],
'_id': unit['id'],
'revision': int(unit['revision']),
'project': unit['store__translation_project__project__fullname'],
'path': unit['store__pootle_path'],
'username': unit['change__submitted_by__username'],
'fullname': fullname,
'email_md5': email_md5,
'source': unit['source_f'],
'target': unit['target_f'],
'iso_submitted_on': iso_submitted_on,
'display_submitted_on': display_submitted_on,
}
class FileParser(BaseParser):
def __init__(self, *args, **kwargs):
super(FileParser, self).__init__(*args, **kwargs)
self.target_language = kwargs.pop('language', None)
self.project = kwargs.pop('project', None)
self.filenames = kwargs.pop('filenames')
def get_units(self):
"""Gets the units to import and its total count."""
units = []
all_filenames = set()
for filename in self.filenames:
if not os.path.exists(filename):
self.stdout.write("File %s doesn't exist. Skipping it." %
filename)
continue
if os.path.isdir(filename):
for dirpath, dirs_, fnames in os.walk(filename):
if (os.path.basename(dirpath) in
["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"]):
continue
for f in fnames:
all_filenames.add(os.path.join(dirpath, f))
else:
all_filenames.add(filename)
for filename in all_filenames:
store = factory.getobject(filename)
if not store.gettargetlanguage() and not self.target_language:
raise CommandError("Unable to determine target language for "
"'%s'. Try again specifying a fallback "
"target language with --target-language" %
filename)
self.filename = filename
units.extend([unit for unit in store.units if unit.istranslated()])
return units, len(units)
def get_unit_data(self, unit):
"""Return dict with data to import for a single unit."""
target_language = unit.gettargetlanguage()
if target_language is None:
target_language = self.target_language
return {
'_index': self.INDEX_NAME,
'_type': target_language,
'_id': unit.getid(),
'revision': 0,
'project': self.project,
'path': self.filename,
'username': None,
'fullname': None,
'email_md5': None,
'source': unit.source,
'target': unit.target,
'iso_submitted_on': None,
'display_submitted_on': None,
}
class Command(BaseCommand):
help = "Load Translation Memory with translations"
def add_arguments(self, parser):
parser.add_argument(
'--refresh',
action='store_true',
dest='refresh',
default=False,
help='Process all items, not just the new ones, so '
'existing translations are refreshed'
)
parser.add_argument(
'--rebuild',
action='store_true',
dest='rebuild',
default=False,
help='Drop the entire TM on start and update everything '
'from scratch'
)
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Report the number of translations to index and quit'
)
# Local TM specific options.
local = parser.add_argument_group('Local TM', 'Pootle Local '
'Translation Memory')
local.add_argument(
'--include-disabled-projects',
action='store_true',
dest='disabled_projects',
default=False,
help='Add translations from disabled projects'
)
# External TM specific options.
external = parser.add_argument_group('External TM', 'Pootle External '
'Translation Memory')
external.add_argument(
nargs='*',
dest='files',
help='Translation memory files',
)
external.add_argument(
'--tm',
action='store',
dest='tm',
default='local',
help="TM to use. TM must exist on settings. TM will be "
"created on the server if it doesn't exist"
)
external.add_argument(
'--target-language',
action='store',
dest='target_language',
default='',
help="Target language to fallback to use in case it can't "
"be guessed for any of the input files."
)
external.add_argument(
'--display-name',
action='store',
dest='project',
default='',
help='Name used when displaying TM matches for these '
'translations.'
)
def _parse_translations(self, **options):
units, total = self.parser.get_units()
if total == 0:
self.stdout.write("No translations to index")
return
self.stdout.write("%s translations to index" % total)
if options['dry_run']:
return
self.stdout.write("")
i = 0
for i, unit in enumerate(units, start=1):
if (i % 1000 == 0) or (i == total):
percent = "%.1f" % (i * 100.0 / total)
self.stdout.write("%s (%s%%)" % (i, percent), ending='\r')
self.stdout.flush()
yield self.parser.get_unit_data(unit)
if i != total:
self.stdout.write("Expected %d, loaded %d." % (total, i))
def _initialize(self, **options):
if not settings.POOTLE_TM_SERVER:
raise CommandError('POOTLE_TM_SERVER setting is missing.')
try:
self.tm_settings = settings.POOTLE_TM_SERVER[options['tm']]
except KeyError:
raise CommandError("Translation Memory '%s' is not defined in the "
"POOTLE_TM_SERVER setting. Please ensure it "
"exists and double-check you typed it "
"correctly." % options['tm'])
self.INDEX_NAME = self.tm_settings['INDEX_NAME']
self.is_local_tm = options['tm'] == 'local'
self.es = Elasticsearch([
{
'host': self.tm_settings['HOST'],
'port': self.tm_settings['PORT'],
}], retry_on_timeout=True
)
# If files to import have been provided.
if options['files']:
if self.is_local_tm:
raise CommandError('You cannot add translations from files to '
'a local TM.')
if not options['project']:
raise CommandError('You must specify a project name with '
'--display-name.')
self.parser = FileParser(stdout=self.stdout, index=self.INDEX_NAME,
filenames=options['files'],
language=options['target_language'],
project=options['project'])
elif not self.is_local_tm:
raise CommandError('You cannot add translations from database to '
'an external TM.')
else:
self.parser = DBParser(
stdout=self.stdout, index=self.INDEX_NAME,
disabled_projects=options['disabled_projects'])
def _set_latest_indexed_revision(self, **options):
self.last_indexed_revision = -1
if (not options['rebuild'] and
not options['refresh'] and
self.es.indices.exists(self.INDEX_NAME)):
result = self.es.search(
index=self.INDEX_NAME,
body={
'aggs': {
'max_revision': {
'max': {
'field': 'revision'
}
}
}
}
)
self.last_indexed_revision = \
result['aggregations']['max_revision']['value'] or -1
self.parser.last_indexed_revision = self.last_indexed_revision
self.stdout.write("Last indexed revision = %s" %
self.last_indexed_revision)
def handle(self, **options):
self._initialize(**options)
if (options['rebuild'] and
not options['dry_run'] and
self.es.indices.exists(self.INDEX_NAME)):
self.es.indices.delete(index=self.INDEX_NAME)
if (not options['dry_run'] and
not self.es.indices.exists(self.INDEX_NAME)):
self.es.indices.create(index=self.INDEX_NAME)
if self.is_local_tm:
self._set_latest_indexed_revision(**options)
if isinstance(self.parser, FileParser):
helpers.bulk(self.es, self._parse_translations(**options))
return
# If we are parsing from DB.
tp_qs = TranslationProject.objects.all()
if options['disabled_projects']:
tp_qs = tp_qs.exclude(project__disabled=True)
for tp in tp_qs:
self.parser.tp_pk = tp.pk
helpers.bulk(self.es, self._parse_translations(**options))
| unho/pootle | pootle/apps/pootle_app/management/commands/update_tmserver.py | Python | gpl-3.0 | 13,500 | 0.000963 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew_tap
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
short_description: Tap a Homebrew repository.
description:
- Tap external Homebrew repositories.
version_added: "1.6"
options:
name:
description:
- The GitHub user/organization repository to tap.
required: true
aliases: ['tap']
url:
description:
- The optional git URL of the repository to tap. The URL is not
assumed to be on GitHub, and the protocol doesn't have to be HTTP.
Any location and protocol that git can handle is fine.
- I(name) option may not be a list of multiple taps (but a single
tap instead) when this option is provided.
required: false
version_added: "2.2"
state:
description:
- state of the repository.
choices: [ 'present', 'absent' ]
required: false
default: 'present'
requirements: [ homebrew ]
'''
EXAMPLES = '''
- homebrew_tap:
name: homebrew/dupes
- homebrew_tap:
name: homebrew/dupes
state: absent
- homebrew_tap:
name: homebrew/dupes,homebrew/science
state: present
- homebrew_tap:
name: telemachus/brew
url: 'https://bitbucket.org/telemachus/brew'
'''
import re
def a_valid_tap(tap):
'''Returns True if the tap is valid.'''
regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
return regex.match(tap)
def already_tapped(module, brew_path, tap):
'''Returns True if already tapped.'''
rc, out, err = module.run_command([
brew_path,
'tap',
])
taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
tap_name = re.sub('homebrew-', '', tap.lower())
return tap_name in taps
def add_tap(module, brew_path, tap, url=None):
'''Adds a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif not already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'tap',
tap,
url,
])
if already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully tapped: %s' % tap
else:
failed = True
msg = 'failed to tap: %s' % tap
else:
msg = 'already tapped: %s' % tap
return (failed, changed, msg)
def add_taps(module, brew_path, taps):
'''Adds one or more taps.'''
failed, unchanged, added, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = add_tap(module, brew_path, tap)
if failed:
break
if changed:
added += 1
else:
unchanged += 1
if failed:
msg = 'added: %d, unchanged: %d, error: ' + msg
msg = msg % (added, unchanged)
elif added:
changed = True
msg = 'added: %d, unchanged: %d' % (added, unchanged)
else:
msg = 'added: %d, unchanged: %d' % (added, unchanged)
return (failed, changed, msg)
def remove_tap(module, brew_path, tap):
'''Removes a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'untap',
tap,
])
if not already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully untapped: %s' % tap
else:
failed = True
msg = 'failed to untap: %s' % tap
else:
msg = 'already untapped: %s' % tap
return (failed, changed, msg)
def remove_taps(module, brew_path, taps):
'''Removes one or more taps.'''
failed, unchanged, removed, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = remove_tap(module, brew_path, tap)
if failed:
break
if changed:
removed += 1
else:
unchanged += 1
if failed:
msg = 'removed: %d, unchanged: %d, error: ' + msg
msg = msg % (removed, unchanged)
elif removed:
changed = True
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
else:
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
return (failed, changed, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], type='list', required=True),
url=dict(default=None, required=False),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
brew_path = module.get_bin_path(
'brew',
required=True,
opt_dirs=['/usr/local/bin']
)
taps = module.params['name']
url = module.params['url']
if module.params['state'] == 'present':
if url is None:
# No tap URL provided explicitly, continue with bulk addition
# of all the taps.
failed, changed, msg = add_taps(module, brew_path, taps)
else:
# When an tap URL is provided explicitly, we allow adding
# *single* tap only. Validate and proceed to add single tap.
if len(taps) > 1:
msg = "List of muliple taps may not be provided with 'url' option."
module.fail_json(msg=msg)
else:
failed, changed, msg = add_tap(module, brew_path, taps[0], url)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
elif module.params['state'] == 'absent':
failed, changed, msg = remove_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| andreaso/ansible | lib/ansible/modules/packaging/os/homebrew_tap.py | Python | gpl-3.0 | 7,344 | 0.000681 |
import random
import requests
import shutil
import logging
import os
import traceback
import ujson
from typing import List, Dict, Any, Optional, Set, Callable, Iterable, Tuple, TypeVar
from django.forms.models import model_to_dict
from zerver.models import Realm, RealmEmoji, Subscription, Recipient, \
Attachment, Stream, Message, UserProfile
from zerver.data_import.sequencer import NEXT_ID
from zerver.lib.actions import STREAM_ASSIGNMENT_COLORS as stream_colors
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.parallel import run_parallel, JobData
# stubs
ZerverFieldsT = Dict[str, Any]
def build_zerver_realm(realm_id: int, realm_subdomain: str, time: float,
other_product: str) -> List[ZerverFieldsT]:
realm = Realm(id=realm_id, date_created=time,
name=realm_subdomain, string_id=realm_subdomain,
description=("Organization imported from %s!" % (other_product)))
auth_methods = [[flag[0], flag[1]] for flag in realm.authentication_methods]
realm_dict = model_to_dict(realm, exclude='authentication_methods')
realm_dict['authentication_methods'] = auth_methods
return[realm_dict]
def build_user_profile(avatar_source: str,
date_joined: Any,
delivery_email: str,
email: str,
full_name: str,
id: int,
is_active: bool,
is_realm_admin: bool,
is_guest: bool,
is_mirror_dummy: bool,
realm_id: int,
short_name: str,
timezone: Optional[str]) -> ZerverFieldsT:
pointer = -1
obj = UserProfile(
avatar_source=avatar_source,
date_joined=date_joined,
delivery_email=delivery_email,
email=email,
full_name=full_name,
id=id,
is_active=is_active,
is_realm_admin=is_realm_admin,
is_guest=is_guest,
pointer=pointer,
realm_id=realm_id,
short_name=short_name,
timezone=timezone,
)
dct = model_to_dict(obj)
return dct
def build_avatar(zulip_user_id: int, realm_id: int, email: str, avatar_url: str,
timestamp: Any, avatar_list: List[ZerverFieldsT]) -> None:
avatar = dict(
path=avatar_url, # Save original avatar url here, which is downloaded later
realm_id=realm_id,
content_type=None,
user_profile_id=zulip_user_id,
last_modified=timestamp,
user_profile_email=email,
s3_path="",
size="")
avatar_list.append(avatar)
def make_subscriber_map(zerver_subscription: List[ZerverFieldsT]) -> Dict[int, Set[int]]:
'''
This can be convenient for building up UserMessage
rows.
'''
subscriber_map = dict() # type: Dict[int, Set[int]]
for sub in zerver_subscription:
user_id = sub['user_profile']
recipient_id = sub['recipient']
if recipient_id not in subscriber_map:
subscriber_map[recipient_id] = set()
subscriber_map[recipient_id].add(user_id)
return subscriber_map
def build_subscription(recipient_id: int, user_id: int,
subscription_id: int) -> ZerverFieldsT:
subscription = Subscription(
color=random.choice(stream_colors),
id=subscription_id)
subscription_dict = model_to_dict(subscription, exclude=['user_profile', 'recipient_id'])
subscription_dict['user_profile'] = user_id
subscription_dict['recipient'] = recipient_id
return subscription_dict
def build_public_stream_subscriptions(
zerver_userprofile: List[ZerverFieldsT],
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
'''
This function is only used for Hipchat now, but it may apply to
future conversions. We often don't get full subscriber data in
the Hipchat export, so this function just autosubscribes all
users to every public stream. This returns a list of Subscription
dicts.
'''
subscriptions = [] # type: List[ZerverFieldsT]
public_stream_ids = {
stream['id']
for stream in zerver_stream
if not stream['invite_only']
}
public_stream_recipient_ids = {
recipient['id']
for recipient in zerver_recipient
if recipient['type'] == Recipient.STREAM
and recipient['type_id'] in public_stream_ids
}
user_ids = [
user['id']
for user in zerver_userprofile
]
for recipient_id in public_stream_recipient_ids:
for user_id in user_ids:
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_private_stream_subscriptions(
get_users: Callable[..., Set[int]],
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions = [] # type: List[ZerverFieldsT]
stream_ids = {
stream['id']
for stream in zerver_stream
if stream['invite_only']
}
recipient_map = {
recipient['id']: recipient['type_id'] # recipient_id -> stream_id
for recipient in zerver_recipient
if recipient['type'] == Recipient.STREAM
and recipient['type_id'] in stream_ids
}
for recipient_id, stream_id in recipient_map.items():
user_ids = get_users(stream_id=stream_id)
for user_id in user_ids:
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_personal_subscriptions(zerver_recipient: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions = [] # type: List[ZerverFieldsT]
personal_recipients = [
recipient
for recipient in zerver_recipient
if recipient['type'] == Recipient.PERSONAL
]
for recipient in personal_recipients:
recipient_id = recipient['id']
user_id = recipient['type_id']
subscription = build_subscription(
recipient_id=recipient_id,
user_id=user_id,
subscription_id=NEXT_ID('subscription'),
)
subscriptions.append(subscription)
return subscriptions
def build_recipient(type_id: int, recipient_id: int, type: int) -> ZerverFieldsT:
recipient = Recipient(
type_id=type_id, # stream id
id=recipient_id,
type=type)
recipient_dict = model_to_dict(recipient)
return recipient_dict
def build_recipients(zerver_userprofile: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
'''
As of this writing, we only use this in the HipChat
conversion. The Slack and Gitter conversions do it more
tightly integrated with creating other objects.
'''
recipients = []
for user in zerver_userprofile:
type_id = user['id']
type = Recipient.PERSONAL
recipient = Recipient(
type_id=type_id,
id=NEXT_ID('recipient'),
type=type,
)
recipient_dict = model_to_dict(recipient)
recipients.append(recipient_dict)
for stream in zerver_stream:
type_id = stream['id']
type = Recipient.STREAM
recipient = Recipient(
type_id=type_id,
id=NEXT_ID('recipient'),
type=type,
)
recipient_dict = model_to_dict(recipient)
recipients.append(recipient_dict)
return recipients
def build_realm(zerver_realm: List[ZerverFieldsT], realm_id: int,
domain_name: str) -> ZerverFieldsT:
realm = dict(zerver_client=[{"name": "populate_db", "id": 1},
{"name": "website", "id": 2},
{"name": "API", "id": 3}],
zerver_customprofilefield=[],
zerver_customprofilefieldvalue=[],
zerver_userpresence=[], # shows last logged in data, which is not available
zerver_userprofile_mirrordummy=[],
zerver_realmdomain=[{"realm": realm_id,
"allow_subdomains": False,
"domain": domain_name,
"id": realm_id}],
zerver_useractivity=[],
zerver_realm=zerver_realm,
zerver_huddle=[],
zerver_userprofile_crossrealm=[],
zerver_useractivityinterval=[],
zerver_reaction=[],
zerver_realmemoji=[],
zerver_realmfilter=[])
return realm
def build_usermessages(zerver_usermessage: List[ZerverFieldsT],
subscriber_map: Dict[int, Set[int]],
recipient_id: int,
mentioned_user_ids: List[int],
message_id: int) -> None:
user_ids = subscriber_map.get(recipient_id, set())
if user_ids:
for user_id in sorted(user_ids):
is_mentioned = user_id in mentioned_user_ids
# Slack and Gitter don't yet triage private messages.
# It's possible we don't even get PMs from them.
is_private = False
usermessage = build_user_message(
user_id=user_id,
message_id=message_id,
is_private=is_private,
is_mentioned=is_mentioned,
)
zerver_usermessage.append(usermessage)
def build_user_message(user_id: int,
message_id: int,
is_private: bool,
is_mentioned: bool) -> ZerverFieldsT:
flags_mask = 1 # For read
if is_mentioned:
flags_mask += 8 # For mentioned
if is_private:
flags_mask += 2048 # For is_private
id = NEXT_ID('user_message')
usermessage = dict(
id=id,
user_profile=user_id,
message=message_id,
flags_mask=flags_mask,
)
return usermessage
def build_defaultstream(realm_id: int, stream_id: int,
defaultstream_id: int) -> ZerverFieldsT:
defaultstream = dict(
stream=stream_id,
realm=realm_id,
id=defaultstream_id)
return defaultstream
def build_stream(date_created: Any, realm_id: int, name: str,
description: str, stream_id: int, deactivated: bool=False,
invite_only: bool=False) -> ZerverFieldsT:
stream = Stream(
name=name,
deactivated=deactivated,
description=description,
date_created=date_created,
invite_only=invite_only,
id=stream_id)
stream_dict = model_to_dict(stream,
exclude=['realm'])
stream_dict['realm'] = realm_id
return stream_dict
def build_message(topic_name: str, pub_date: float, message_id: int, content: str,
rendered_content: Optional[str], user_id: int, recipient_id: int,
has_image: bool=False, has_link: bool=False,
has_attachment: bool=True) -> ZerverFieldsT:
zulip_message = Message(
rendered_content_version=1, # this is Zulip specific
pub_date=pub_date,
id=message_id,
content=content,
rendered_content=rendered_content,
has_image=has_image,
has_attachment=has_attachment,
has_link=has_link)
zulip_message.set_topic_name(topic_name)
zulip_message_dict = model_to_dict(zulip_message,
exclude=['recipient', 'sender', 'sending_client'])
zulip_message_dict['sender'] = user_id
zulip_message_dict['sending_client'] = 1
zulip_message_dict['recipient'] = recipient_id
return zulip_message_dict
def build_attachment(realm_id: int, message_ids: Set[int],
user_id: int, fileinfo: ZerverFieldsT, s3_path: str,
zerver_attachment: List[ZerverFieldsT]) -> None:
"""
This function should be passed a 'fileinfo' dictionary, which contains
information about 'size', 'created' (created time) and ['name'] (filename).
"""
attachment_id = NEXT_ID('attachment')
attachment = Attachment(
id=attachment_id,
size=fileinfo['size'],
create_time=fileinfo['created'],
is_realm_public=True,
path_id=s3_path,
file_name=fileinfo['name'])
attachment_dict = model_to_dict(attachment,
exclude=['owner', 'messages', 'realm'])
attachment_dict['owner'] = user_id
attachment_dict['messages'] = list(message_ids)
attachment_dict['realm'] = realm_id
zerver_attachment.append(attachment_dict)
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,
threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:
"""
This function gets the avatar of the user and saves it in the
user's avatar directory with both the extensions '.png' and '.original'
Required parameters:
1. avatar_list: List of avatars to be mapped in avatars records.json file
2. avatar_dir: Folder where the downloaded avatars are saved
3. realm_id: Realm ID.
We use this for Slack and Gitter conversions, where avatars need to be
downloaded. For simpler conversions see write_avatar_png.
"""
def get_avatar(avatar_upload_item: List[str]) -> None:
avatar_url = avatar_upload_item[0]
image_path = os.path.join(avatar_dir, avatar_upload_item[1])
original_image_path = os.path.join(avatar_dir, avatar_upload_item[2])
response = requests.get(avatar_url + size_url_suffix, stream=True)
with open(image_path, 'wb') as image_file:
shutil.copyfileobj(response.raw, image_file)
shutil.copy(image_path, original_image_path)
logging.info('######### GETTING AVATARS #########\n')
logging.info('DOWNLOADING AVATARS .......\n')
avatar_original_list = []
avatar_upload_list = []
for avatar in avatar_list:
avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
avatar_url = avatar['path']
avatar_original = dict(avatar)
image_path = ('%s.png' % (avatar_hash))
original_image_path = ('%s.original' % (avatar_hash))
avatar_upload_list.append([avatar_url, image_path, original_image_path])
# We don't add the size field here in avatar's records.json,
# since the metadata is not needed on the import end, and we
# don't have it until we've downloaded the files anyway.
avatar['path'] = image_path
avatar['s3_path'] = image_path
avatar_original['path'] = original_image_path
avatar_original['s3_path'] = original_image_path
avatar_original_list.append(avatar_original)
# Run downloads parallely
output = []
for (status, job) in run_parallel_wrapper(get_avatar, avatar_upload_list, threads=threads):
output.append(job)
logging.info('######### GETTING AVATARS FINISHED #########\n')
return avatar_list + avatar_original_list
def write_avatar_png(avatar_folder: str,
realm_id: int,
user_id: int,
bits: bytes) -> ZerverFieldsT:
'''
Use this function for conversions like Hipchat where
the bits for the .png file come in something like
a users.json file, and where we don't have to
fetch avatar images externally.
'''
avatar_hash = user_avatar_path_from_ids(
user_profile_id=user_id,
realm_id=realm_id,
)
image_fn = avatar_hash + '.original'
image_path = os.path.join(avatar_folder, image_fn)
with open(image_path, 'wb') as image_file:
image_file.write(bits)
# Return metadata that eventually goes in records.json.
metadata = dict(
path=image_path,
s3_path=image_path,
realm_id=realm_id,
user_profile_id=user_id,
)
return metadata
ListJobData = TypeVar('ListJobData')
def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData],
threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]:
logging.info("Distributing %s items across %s threads" % (len(full_items), threads))
def wrapping_function(items: List[ListJobData]) -> int:
count = 0
for item in items:
try:
f(item)
except Exception:
logging.info("Error processing item: %s" % (item,))
traceback.print_exc()
count += 1
if count % 1000 == 0:
logging.info("A download thread finished %s items" % (count,))
return 0
job_lists = [full_items[i::threads] for i in range(threads)] # type: List[List[ListJobData]]
return run_parallel(wrapping_function, job_lists, threads=threads)
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the uploads and saves it in the realm's upload directory.
Required parameters:
1. upload_list: List of uploads to be mapped in uploads records.json file
2. upload_dir: Folder where the downloaded uploads are saved
"""
def get_uploads(upload: List[str]) -> None:
upload_url = upload[0]
upload_path = upload[1]
upload_path = os.path.join(upload_dir, upload_path)
response = requests.get(upload_url, stream=True)
os.makedirs(os.path.dirname(upload_path), exist_ok=True)
with open(upload_path, 'wb') as upload_file:
shutil.copyfileobj(response.raw, upload_file)
logging.info('######### GETTING ATTACHMENTS #########\n')
logging.info('DOWNLOADING ATTACHMENTS .......\n')
upload_url_list = []
for upload in upload_list:
upload_url = upload['path']
upload_s3_path = upload['s3_path']
upload_url_list.append([upload_url, upload_s3_path])
upload['path'] = upload_s3_path
# Run downloads parallely
output = []
for (status, job) in run_parallel_wrapper(get_uploads, upload_url_list, threads=threads):
output.append(job)
logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
return upload_list
def build_realm_emoji(realm_id: int,
name: str,
id: int,
file_name: str) -> ZerverFieldsT:
return model_to_dict(
RealmEmoji(
realm_id=realm_id,
name=name,
id=id,
file_name=file_name,
)
)
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the custom emojis and saves in the output emoji folder.
Required parameters:
1. zerver_realmemoji: List of all RealmEmoji objects to be imported
2. emoji_dir: Folder where the downloaded emojis are saved
3. emoji_url_map: Maps emoji name to its url
"""
def get_emojis(upload: List[str]) -> None:
emoji_url = upload[0]
emoji_path = upload[1]
upload_emoji_path = os.path.join(emoji_dir, emoji_path)
response = requests.get(emoji_url, stream=True)
os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
with open(upload_emoji_path, 'wb') as emoji_file:
shutil.copyfileobj(response.raw, emoji_file)
emoji_records = []
upload_emoji_list = []
logging.info('######### GETTING EMOJIS #########\n')
logging.info('DOWNLOADING EMOJIS .......\n')
for emoji in zerver_realmemoji:
emoji_url = emoji_url_map[emoji['name']]
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=emoji['realm'],
emoji_file_name=emoji['name'])
upload_emoji_list.append([emoji_url, emoji_path])
emoji_record = dict(emoji)
emoji_record['path'] = emoji_path
emoji_record['s3_path'] = emoji_path
emoji_record['realm_id'] = emoji_record['realm']
emoji_record.pop('realm')
emoji_records.append(emoji_record)
# Run downloads parallely
output = []
for (status, job) in run_parallel_wrapper(get_emojis, upload_emoji_list, threads=threads):
output.append(job)
logging.info('######### GETTING EMOJIS FINISHED #########\n')
return emoji_records
def create_converted_data_files(data: Any, output_dir: str, file_path: str) -> None:
output_file = output_dir + file_path
os.makedirs(os.path.dirname(output_file), exist_ok=True)
with open(output_file, 'w') as fp:
ujson.dump(data, fp, indent=4)
| jackrzhang/zulip | zerver/data_import/import_util.py | Python | apache-2.0 | 21,272 | 0.003197 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import net_utils
from tempest import config
from tempest.lib import decorators
from tempest import test
CONF = config.CONF
class FloatingIPTestJSON(base.BaseNetworkTest):
"""Tests the following operations in the Neutron API:
Create a Floating IP
Update a Floating IP
Delete a Floating IP
List all Floating IPs
Show Floating IP details
Associate a Floating IP with a port and then delete that port
Associate a Floating IP with a port and then with a port on another
router
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
public_network_id which is the id for the external network present
"""
@classmethod
def skip_checks(cls):
super(FloatingIPTestJSON, cls).skip_checks()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
if not CONF.network.public_network_id:
msg = "The public_network_id option must be specified."
raise cls.skipException(msg)
if not CONF.network_feature_enabled.floating_ips:
raise cls.skipException("Floating ips are not available")
@classmethod
def resource_setup(cls):
super(FloatingIPTestJSON, cls).resource_setup()
cls.ext_net_id = CONF.network.public_network_id
# Create network, subnet, router and add interface
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network, enable_dhcp=False)
cls.router = cls.create_router(external_network_id=cls.ext_net_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
# Create two ports one each for Creation and Updating of floatingIP
for i in range(2):
cls.create_port(cls.network)
@decorators.attr(type='smoke')
@decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e8718')
def test_create_list_show_update_delete_floating_ip(self):
# Creates a floating IP
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[0]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
self.assertEqual(created_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertIn(created_floating_ip['fixed_ip_address'],
[ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
# Verifies the details of a floating_ip
floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertEqual(shown_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertEqual(shown_floating_ip['tenant_id'],
created_floating_ip['tenant_id'])
self.assertEqual(shown_floating_ip['floating_ip_address'],
created_floating_ip['floating_ip_address'])
self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
# Verify the floating ip exists in the list of all floating_ips
floating_ips = self.floating_ips_client.list_floatingips()
floatingip_id_list = list()
for f in floating_ips['floatingips']:
floatingip_id_list.append(f['id'])
self.assertIn(created_floating_ip['id'], floatingip_id_list)
# Associate floating IP to the other port
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=self.ports[1]['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['port_id'], self.ports[1]['id'])
self.assertEqual(updated_floating_ip['fixed_ip_address'],
self.ports[1]['fixed_ips'][0]['ip_address'])
self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
# Disassociate floating IP from the port
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=None)
updated_floating_ip = floating_ip['floatingip']
self.assertIsNone(updated_floating_ip['port_id'])
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
self.assertIsNone(updated_floating_ip['router_id'])
@decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
def test_floating_ip_delete_port(self):
# Create a floating IP
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id)
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
# Create a port
port = self.ports_client.create_port(network_id=self.network['id'])
created_port = port['port']
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=created_port['id'])
# Delete port
self.ports_client.delete_port(created_port['id'])
# Verifies the details of the floating_ip
floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
# Confirm the fields are back to None
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertIsNone(shown_floating_ip['port_id'])
self.assertIsNone(shown_floating_ip['fixed_ip_address'])
self.assertIsNone(shown_floating_ip['router_id'])
@decorators.idempotent_id('1bb2f731-fe5a-4b8c-8409-799ade1bed4d')
def test_floating_ip_update_different_router(self):
# Associate a floating IP to a port on a router
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[1]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
port_other_router = self.create_port(network2)
# Associate floating IP to the other port on another router
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=port_other_router['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['router_id'], router2['id'])
self.assertEqual(updated_floating_ip['port_id'],
port_other_router['id'])
self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5')
def test_create_floating_ip_specifying_a_fixed_ip_address(self):
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[1]['id'],
fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertEqual(created_floating_ip['fixed_ip_address'],
self.ports[1]['fixed_ips'][0]['ip_address'])
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=None)
self.assertIsNone(floating_ip['floatingip']['port_id'])
@decorators.idempotent_id('45c4c683-ea97-41ef-9c51-5e9802f2f3d7')
def test_create_update_floatingip_with_port_multiple_ip_address(self):
# Find out ips that can be used for tests
list_ips = net_utils.get_unused_ip_addresses(
self.ports_client,
self.subnets_client,
self.subnet['network_id'],
self.subnet['id'],
2)
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
# Create port
body = self.ports_client.create_port(network_id=self.network['id'],
fixed_ips=fixed_ips)
port = body['port']
self.addCleanup(self.ports_client.delete_port, port['id'])
# Create floating ip
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port['id'],
fixed_ip_address=list_ips[0])
floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
floating_ip['id'])
self.assertIsNotNone(floating_ip['id'])
self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
# Update floating ip
body = self.floating_ips_client.update_floatingip(
floating_ip['id'], port_id=port['id'],
fixed_ip_address=list_ips[1])
update_floating_ip = body['floatingip']
self.assertEqual(update_floating_ip['fixed_ip_address'],
list_ips[1])
| vedujoshi/tempest | tempest/api/network/test_floating_ips.py | Python | apache-2.0 | 10,882 | 0 |
""" 9.4 Write a program to read through the mbox-short.txt and
figure out who has the sent the greatest number of mail messages.
The program looks for 'From ' lines and takes the second word of those lines as the person who sent the mail.
The program creates a Python dictionary that maps the sender's mail address to a count of the number of times they appear in the file.
After the dictionary is produced, the program reads through the dictionary using a maximum loop to find the most prolific committer.
Desired output = cwen@iupui.edu 5
"""
filename = raw_input("enter file name:")
handle = None
try:
handle = open(filename)
except:
print 'File cannot be opened or read.', filename
exit()
counts = {}
for line in handle:
if line.strip().startswith('From:'):
line = line.strip().lower()
words = line.split()
for word in words:
if '@' in word:
counts[word] = counts.get(word, 0) + 1
handle.close()
# always close the file as soon as possible. Freeing resources asap is a best practice.
email = None
email_count = 0
for word,count in counts.items():
if email is None or count > email_count:
email = word
email_count = count
print email, email_count
| missulmer/Pythonstudy | coursera_python_specialization/9_4.py | Python | cc0-1.0 | 1,246 | 0.005618 |
# use priority queue to implement stack and queue
import heapq
class stack:
data = []
highestPriority = 0
lowestPriority = 0
def push(self, e):
self.highestPriority -= 1 # smaller value means priority is higher
heapq.heappush(self.data, (self.highestPriority, e))
def pop(self):
if not s.isEmpty():
self.highestPriority += 1
return heapq.heappop(self.data)[1]
else:
return None
def isEmpty(self):
return self.highestPriority >= self.lowestPriority
class queue:
data = []
highestPriority = 0
lowestPriority = 0
def enqueue(self, e):
self.lowestPriority += 1 # increase the lowest priority (lowering)
heapq.heappush(self.data, (self.lowestPriority, e))
def dequeue(self):
if self.isEmpty():
return None
else:
# increaste the highest priority (lowering it )
self.highestPriority += 1
return heapq.heappop(self.data)[1]
def isEmpty(self):
if self.highestPriority >= self.lowestPriority:
self.highestPriority = 0
self.lowestPriority = 0
return True
else:
return False
def heapsort(iterable):
h = []
for i in iterable:
heapq.heappush(h, i)
return [heapq.heappop(h) for x in range(len(iterable))]
if __name__ == '__main__':
import random
data = [random.randint(1, 100) for x in range(15)]
data.sort()
'''
s = stack()
for i in data:
s.push(i)
while not s.isEmpty():
print(s.pop())
'''
q = queue()
for i in data:
q.enqueue(i)
while not q.isEmpty():
print(q.dequeue())
| yubinbai/python_practice | priorityqueue.py | Python | apache-2.0 | 1,718 | 0.005239 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-05 14:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partner', '0016_auto_20191115_2151'),
]
operations = [
migrations.AlterField(
model_name='historicalpartner',
name='name',
field=models.CharField(blank=True, db_index=True, max_length=128, verbose_name='Name'),
),
migrations.AlterField(
model_name='partner',
name='name',
field=models.CharField(blank=True, db_index=True, max_length=128, verbose_name='Name'),
),
migrations.AlterField(
model_name='stockalert',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date Created'),
),
]
| eduNEXT/edunext-ecommerce | ecommerce/extensions/partner/migrations/0017_auto_20200305_1448.py | Python | agpl-3.0 | 887 | 0.003382 |
import MySQLdb
from parameters import Parameters
import logging
def run_sql(sql, db=None):
db = MySQLdb.connect(host=Parameters.DB_HOST, user=Parameters.DB_USER, passwd=Parameters.DB_PASSWORD, db=Parameters.DB_SCHEMA)
cursor = db.cursor()
logging.debug(sql)
try:
cursor.execute(sql)
db.commit()
data = cursor.fetchall()
db.close()
except Exception as e:
logging.error(e)
db.rollback()
try:
return data[0][0]
except:
return True
def run_sql_multi(sql_list):
for sql in sql_list:
run_sql(sql)
def initialise_db():
run_sql_multi(Parameters.SQL_INITIALISE)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
initialise_db()
| willycs40/zoopla_pull | db.py | Python | bsd-2-clause | 795 | 0.015094 |
import logging
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils import timezone
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.signals import user_registered
from reviewboard.reviews.models import ReviewRequest, Review
from reviewboard.reviews.signals import review_request_published, \
review_published, reply_published
from reviewboard.reviews.views import build_diff_comment_fragments
def review_request_published_cb(sender, user, review_request, changedesc,
**kwargs):
"""
Listens to the ``review_request_published`` signal and sends an
email if this type of notification is enabled (through
``mail_send_review_mail`` site configuration).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("mail_send_review_mail"):
mail_review_request(user, review_request, changedesc)
def review_published_cb(sender, user, review, **kwargs):
"""
Listens to the ``review_published`` signal and sends an email if
this type of notification is enabled (through
``mail_send_review_mail`` site configuration).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("mail_send_review_mail"):
mail_review(user, review)
def reply_published_cb(sender, user, reply, **kwargs):
"""
Listens to the ``reply_published`` signal and sends an email if
this type of notification is enabled (through
``mail_send_review_mail`` site configuration).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("mail_send_review_mail"):
mail_reply(user, reply)
def user_registered_cb(user, **kwargs):
"""
Listens for new user registrations and sends a new user registration
e-mail to administrators, if enabled.
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("mail_send_new_user_mail"):
mail_new_user(user)
def connect_signals():
review_request_published.connect(review_request_published_cb,
sender=ReviewRequest)
review_published.connect(review_published_cb, sender=Review)
reply_published.connect(reply_published_cb, sender=Review)
user_registered.connect(user_registered_cb)
def build_email_address(fullname, email):
if not fullname:
return email
else:
return u'"%s" <%s>' % (fullname, email)
def get_email_address_for_user(u):
return build_email_address(u.get_full_name(), u.email)
def get_email_addresses_for_group(g):
if g.mailing_list:
if g.mailing_list.find(",") == -1:
# The mailing list field has only one e-mail address in it,
# so we can just use that and the group's display name.
return [u'"%s" <%s>' % (g.display_name, g.mailing_list)]
else:
# The mailing list field has multiple e-mail addresses in it.
# We don't know which one should have the group's display name
# attached to it, so just return their custom list as-is.
return g.mailing_list.split(',')
else:
return [get_email_address_for_user(u)
for u in g.users.filter(is_active=True)]
class SpiffyEmailMessage(EmailMultiAlternatives):
"""An EmailMessage subclass with improved header and message ID support.
This also knows about several headers (standard and variations),
including Sender/X-Sender, In-Reply-To/References, and Reply-To.
The generated Message-ID header from the e-mail can be accessed
through the :py:attr:`message_id` attribute after the e-mail is sent.
"""
def __init__(self, subject, text_body, html_body, from_email, sender,
to, cc, in_reply_to, headers={}):
headers = headers.copy()
if sender:
headers['Sender'] = sender
headers['X-Sender'] = sender
if in_reply_to:
headers['In-Reply-To'] = in_reply_to
headers['References'] = in_reply_to
headers['Reply-To'] = from_email
# Mark the mail as 'auto-generated' (according to RFC 3834) to
# hopefully avoid auto replies.
headers['Auto-Submitted'] = 'auto-generated'
headers['From'] = from_email
super(SpiffyEmailMessage, self).__init__(subject, text_body,
settings.DEFAULT_FROM_EMAIL,
to, headers=headers)
self.cc = cc or []
self.message_id = None
self.attach_alternative(html_body, "text/html")
def message(self):
msg = super(SpiffyEmailMessage, self).message()
self.message_id = msg['Message-ID']
return msg
def recipients(self):
"""Returns a list of all recipients of the e-mail. """
return self.to + self.bcc + self.cc
def send_review_mail(user, review_request, subject, in_reply_to,
extra_recipients, text_template_name,
html_template_name, context={}):
"""
Formats and sends an e-mail out with the current domain and review request
being added to the template context. Returns the resulting message ID.
"""
current_site = Site.objects.get_current()
from_email = get_email_address_for_user(user)
recipients = set()
to_field = set()
if from_email:
recipients.add(from_email)
if review_request.submitter.is_active:
recipients.add(get_email_address_for_user(review_request.submitter))
for u in review_request.target_people.filter(is_active=True):
recipients.add(get_email_address_for_user(u))
to_field.add(get_email_address_for_user(u))
for group in review_request.target_groups.all():
for address in get_email_addresses_for_group(group):
recipients.add(address)
for profile in review_request.starred_by.all():
if profile.user.is_active:
recipients.add(get_email_address_for_user(profile.user))
if extra_recipients:
for recipient in extra_recipients:
if recipient.is_active:
recipients.add(get_email_address_for_user(recipient))
siteconfig = current_site.config.get()
domain_method = siteconfig.get("site_domain_method")
context['user'] = user
context['domain'] = current_site.domain
context['domain_method'] = domain_method
context['review_request'] = review_request
if review_request.local_site:
context['local_site_name'] = review_request.local_site.name
text_body = render_to_string(text_template_name, context)
html_body = render_to_string(html_template_name, context)
# Set the cc field only when the to field (i.e People) are mentioned,
# so that to field consists of Reviewers and cc consists of all the
# other members of the group
if to_field:
cc_field = recipients.symmetric_difference(to_field)
else:
to_field = recipients
cc_field = set()
base_url = '%s://%s' % (domain_method, current_site.domain)
headers = {
'X-ReviewBoard-URL': base_url,
'X-ReviewRequest-URL': base_url + review_request.get_absolute_url(),
'X-ReviewGroup': ', '.join(group.name for group in \
review_request.target_groups.all())
}
sender = None
if settings.DEFAULT_FROM_EMAIL:
sender = build_email_address(user.get_full_name(),
settings.DEFAULT_FROM_EMAIL)
if sender == from_email:
# RFC 2822 states that we should only include Sender if the
# two are not equal.
sender = None
message = SpiffyEmailMessage(subject.strip(), text_body, html_body,
from_email, sender, list(to_field),
list(cc_field), in_reply_to, headers)
try:
message.send()
except Exception, e:
logging.error("Error sending e-mail notification with subject '%s' on "
"behalf of '%s' to '%s': %s",
subject.strip(),
from_email,
','.join(list(to_field) + list(cc_field)),
e,
exc_info=1)
return message.message_id
def mail_review_request(user, review_request, changedesc=None):
"""
Send an e-mail representing the supplied review request.
The "changedesc" argument is an optional ChangeDescription showing
what changed in a review request, possibly with explanatory text from
the submitter. This is created when saving a draft on a public review
request, and will be None when publishing initially. This is used by
the template to add contextual (updated) flags to inform people what
changed.
"""
# If the review request is not yet public or has been discarded, don't send
# any mail.
if not review_request.public or review_request.status == 'D':
return
subject = u"Review Request %d: %s" % (review_request.id, review_request.summary)
reply_message_id = None
if review_request.email_message_id:
# Fancy quoted "replies"
subject = "Re: " + subject
reply_message_id = review_request.email_message_id
extra_recipients = review_request.participants
else:
extra_recipients = None
extra_context = {}
if changedesc:
extra_context['change_text'] = changedesc.text
extra_context['changes'] = changedesc.fields_changed
review_request.time_emailed = timezone.now()
review_request.email_message_id = \
send_review_mail(user, review_request, subject, reply_message_id,
extra_recipients,
'notifications/review_request_email.txt',
'notifications/review_request_email.html',
extra_context)
review_request.save()
def mail_review(user, review):
"""Sends an e-mail representing the supplied review."""
review_request = review.review_request
if not review_request.public:
return
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
extra_context = {
'user': user,
'review': review,
}
has_error, extra_context['comment_entries'] = \
build_diff_comment_fragments(
review.ordered_comments, extra_context,
"notifications/email_diff_comment_fragment.html")
review.email_message_id = \
send_review_mail(user,
review_request,
u"Re: Review Request %d: %s" % (review_request.id, review_request.summary),
review_request.email_message_id,
None,
'notifications/review_email.txt',
'notifications/review_email.html',
extra_context)
review.time_emailed = timezone.now()
review.save()
def mail_reply(user, reply):
"""
Sends an e-mail representing the supplied reply to a review.
"""
review = reply.base_reply_to
review_request = review.review_request
if not review_request.public:
return
extra_context = {
'user': user,
'review': review,
'reply': reply,
}
has_error, extra_context['comment_entries'] = \
build_diff_comment_fragments(
reply.comments.order_by('filediff', 'first_line'),
extra_context,
"notifications/email_diff_comment_fragment.html")
reply.email_message_id = \
send_review_mail(user,
review_request,
u"Re: Review Request %d: %s" % (review_request.id, review_request.summary),
review.email_message_id,
review.participants,
'notifications/reply_email.txt',
'notifications/reply_email.html',
extra_context)
reply.time_emailed = timezone.now()
reply.save()
def mail_new_user(user):
"""Sends an e-mail to administrators for newly registered users."""
current_site = Site.objects.get_current()
siteconfig = current_site.config.get_current()
domain_method = siteconfig.get("site_domain_method")
subject = "New Review Board user registration for %s" % user.username
from_email = get_email_address_for_user(user)
context = {
'domain': current_site.domain,
'domain_method': domain_method,
'user': user,
'user_url': reverse('admin:auth_user_change', args=(user.id,))
}
text_message = render_to_string('notifications/new_user_email.txt', context)
html_message = render_to_string('notifications/new_user_email.html',
context)
message = SpiffyEmailMessage(subject.strip(), text_message, html_message,
settings.SERVER_EMAIL, settings.SERVER_EMAIL,
[build_email_address(*a)
for a in settings.ADMINS], None, None)
try:
message.send()
except Exception, e:
logging.error("Error sending e-mail notification with subject '%s' on "
"behalf of '%s' to admin: %s",
subject.strip(), from_email, e, exc_info=1)
| atagar/ReviewBoard | reviewboard/notifications/email.py | Python | mit | 13,727 | 0.000437 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado #
# (pacoqueen@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image, XPreformatted, Preformatted, PageBreak, KeepTogether, CondPageBreak
from reportlab.platypus.flowables import Flowable
from reportlab.rl_config import defaultPageSize
from reportlab.lib import colors, enums
from reportlab.lib.units import cm
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
import sys, os#, Image
from factura_multipag import Linea, TablaFija
try:
import pclases, utils
except ImportError:
try:
import sys, os
sys.path.insert(0, os.path.join("..", "framework"))
import pclases, utils
except ImportError:
sys.path.insert(0, ".")
import pclases, utils
try:
from geninformes import give_me_the_name_baby, escribe, rectangulo, el_encogedor_de_fuentes_de_doraemon, agregarFila
except ImportError:
import sys
sys.path.append(os.path.join("..", "informes"))
from geninformes import give_me_the_name_baby, escribe, rectangulo, el_encogedor_de_fuentes_de_doraemon, agregarFila
from tempfile import gettempdir
PAGE_HEIGHT = defaultPageSize[1]; PAGE_WIDTH = defaultPageSize[0]
estilos = getSampleStyleSheet()
class lastPageNumberFlowable(Flowable):
def __init__(self, xoffset = 0, yoffset = 0):
Flowable.__init__(self)
self._xoffset = xoffset
self._yoffset = yoffset
def draw(self):
canvas = self.canv
if not canvas.hasForm("lastPageNumber"):
canvas.beginForm("lastPageNumber")
canvas.setFont("Times-Italic", 9)
canvas.drawString(self._xoffset,
self._yoffset,
str(canvas.getPageNumber()))
canvas.endForm()
class LineaHorizontal(Flowable):
def __init__(self, ancho = None, grosor = 1):
self.line_thickness = grosor
if ancho:
self._width = ancho
else:
self._width = None
def wrap(self, availWidth, availHeight):
if self._width is None:
self._width = availWidth
self._height = self.line_thickness
return self._width, self._height
def draw(self):
self.canv.setLineWidth(self.line_thickness)
orig = (PAGE_WIDTH / 2) - (self._width / 2)
orig -= 2.75 * cm # Margen al llamar a mi draw desde el build.
self.canv.line(orig,
.5 * self.line_thickness,
self._width + orig,
.5 * self.line_thickness)
def sanitize(d):
"""
Sustituye todo lo que no sea texto:
- Si es float, por su representación con puntos y una coma con 2 decimales.
- Si es entero, por su equivalente en texto.
"""
def tostr(v):
if isinstance(v, float):
v = utils.float2str(v)
elif isinstance(v, int):
v = utils.int2str(v)
elif isinstance(v, (list, tuple)):
# Recursividad, divino tesoro...
v = sanitize(v)
return v
if isinstance(d, dict):
for k in d.keys():
d[k] = tostr(d[k])
elif isinstance(d, (list, tuple)):
res = []
for i in d:
res.append(tostr(i))
d = type(d)(res)
return d
def cabecera_y_cliente(canvas,
doc,
datos_cliente,
datos_de_la_empresa,
datos_albaran):
"""
Escribe el texto "ALBARÁN" y los datos del cliente.
Los datos del cliente vienen en un diccionario con:
código (de cliente), cif, razón social, dirección, población, provincia.
"""
fuente = "Helvetica"
tamanno = 24
canvas.saveState()
canvas.setFont(fuente, tamanno)
canvas.drawString(PAGE_WIDTH
- canvas.stringWidth(escribe("ALBARÁN"),fuente,tamanno)
- 1.0*cm,
PAGE_HEIGHT - 1.5*cm,
escribe("ALBARÁN"))
canvas.restoreState()
tamanno = 12
altura_linea = 16
xCliente = (PAGE_WIDTH - 1*cm) / 2.5
linea = (PAGE_HEIGHT-1.5*cm) - 0.10*cm - 2*cm
rectangulo(canvas,
(xCliente - 0.2*cm, PAGE_HEIGHT - 1.5*cm + altura_linea - 2*cm),
(PAGE_WIDTH - 1*cm,
(PAGE_HEIGHT- 1.5*cm + altura_linea)
-(altura_linea*5 + 0.5*cm) - 2*cm)
)
canvas.drawString(xCliente,
linea,
escribe(
"Cód. cliente: %s C.I.F.: %s" % (
datos_cliente['código'],
datos_cliente['cif'])))
linea -= altura_linea
el_encogedor_de_fuentes_de_doraemon(canvas,
fuente,
tamanno,
xCliente,
PAGE_WIDTH - 1*cm,
linea,
escribe(datos_cliente['razón social']))
linea -= altura_linea
el_encogedor_de_fuentes_de_doraemon(canvas,
fuente,
tamanno,
xCliente,
PAGE_WIDTH - 1*cm,
linea,
escribe(datos_cliente['dirección']))
linea -= altura_linea
canvas.drawString(xCliente,
linea,
escribe(datos_cliente['población']))
linea -= altura_linea
canvas.drawString(xCliente,
linea,
escribe(datos_cliente['provincia']))
# Datos de la empresa
dibujar_datos_empresa(canvas, datos_de_la_empresa)
# Cabecera de factura
build_tabla_cabecera(canvas, datos_albaran, 22.5*cm)
def dibujar_datos_empresa(canvas, datos_de_la_empresa):
"""
Dibuja los datos de la empresa en la parte superior.
"""
logo, empresa = build_logo_y_empresa_por_separado(datos_de_la_empresa)
logo.drawOn(canvas, 1*cm, PAGE_HEIGHT - 2.8 * cm)
fuente = "Helvetica"
tamanno = 16
for i in range(len(empresa)):
if i == 1:
tamanno -= 4 # Primera línea (nombre empresa) un poco más grande.
linea = PAGE_HEIGHT - 1.5 * cm
el_encogedor_de_fuentes_de_doraemon(canvas,
fuente,
tamanno,
3.25*cm,
PAGE_WIDTH - 5*cm,
linea - (i*0.55*cm),
escribe(empresa[i]))
def build_tabla_cabecera(canvas, datos_albaran, y1):
# Cabecera.
canvas.saveState()
x1 = 1.0 * cm + 1
limite = x1 + 0.9 * PAGE_WIDTH
incremento = (limite - x1) / 3
#y1 = 22.5 * cm
y2 = y1 - 18
for texto, clave in (("Código cliente", "codcliente"),
("Nº Albarán", "número"),
("Fecha", "fecha")):
x2 = x1 + incremento
canvas.setFont("Times-Roman", 12)
dato_albaran = escribe(datos_albaran[clave])
rectangulo(canvas,
(x1, y1),
(x2, y2),
texto = dato_albaran,
alinTxtX = "centro",
alinTxtY = "centro")
canvas.setFont("Times-Roman", 10)
canvas.drawString(x1+0.2*cm, y1 + 3, texto)
x1 += incremento
canvas.restoreState()
# Página x de y.
canvas.saveState()
canvas.setFont("Times-Italic", 9)
canvas.drawRightString(0.9 * PAGE_WIDTH - 0.5 * cm,
1.0 * cm,
escribe("Página %d de " % canvas.getPageNumber()))
canvas.doForm("lastPageNumber")
canvas.restoreState()
def solo_cabecera(canvas,
doc,
datos_de_la_empresa,
datos_albaran):
"""
Escribe el texto "ALBARÁN" y los datos del cliente.
Los datos del cliente vienen en un diccionario con:
código (de cliente), cif, razón social, dirección, población, provincia.
"""
fuente = "Helvetica"
tamanno = 24
canvas.saveState()
canvas.setFont(fuente, tamanno)
canvas.drawString(PAGE_WIDTH
- canvas.stringWidth(escribe("ALBARÁN"),fuente,tamanno)
- 1.0*cm,
PAGE_HEIGHT - 1.5*cm,
escribe("ALBARÁN"))
canvas.restoreState()
# Datos de la empresa
dibujar_datos_empresa(canvas, datos_de_la_empresa)
#logo, empresa = build_logo_y_empresa_por_separado(datos_de_la_empresa)
##LineaHorizontal(0.9 * PAGE_WIDTH).drawOn(canvas, 78.0, 12.5*cm)
##LineaHorizontal(0.9 * PAGE_WIDTH).drawOn(canvas, 78.0, 12.4 *cm)
#logo.drawOn(canvas, 1*cm, PAGE_HEIGHT - 2.8 * cm)
#fuente = "Helvetica"
#tamanno = 10
#for i in range(len(empresa)):
# linea = PAGE_HEIGHT - 1.5 * cm
# el_encogedor_de_fuentes_de_doraemon(canvas,
# fuente,
# tamanno,
# 3.25*cm,
# PAGE_WIDTH - 1*cm,
# linea - (i*0.5*cm),
# escribe(empresa[i]))
# Cabecera.
build_tabla_cabecera(canvas, datos_albaran, 26.0*cm)
def build_tabla_contenido(data):
"""
Construye la tabla del contenido del albaranSalida.
Los datos deben venir en listas. Cada línea de la tabla, una tupla o lista
con el código, descripción, cantidad, precio unitario (con dto. si lo
lleva e IVA) y número de pedido.
El precio y cantidad deben ser flotantes para poder calcular el subtotal.
"""
estilo_cabecera_tabla = ParagraphStyle("Cabecera tabla",
parent=estilos["Heading3"])
estilo_cabecera_tabla.fontName = "Times-Bold"
estilo_cabecera_tabla.alignment = enums.TA_CENTER
estilo_numeros_tabla = ParagraphStyle("Números tabla",
parent=estilos["Normal"])
estilo_numeros_tabla.alignment = enums.TA_RIGHT
datos = [(Paragraph(escribe("Código"), estilo_cabecera_tabla),
Paragraph(escribe("Descripción"), estilo_cabecera_tabla),
Paragraph("Cantidad", estilo_cabecera_tabla),
Paragraph("Precio/U", estilo_cabecera_tabla),
Paragraph("Total c/IVA", estilo_cabecera_tabla),
Paragraph(escribe("Nº Pedido"), estilo_cabecera_tabla))
]
for d in data:
fila = (escribe(d[0]),
Paragraph(escribe(d[1]),estilos["Normal"]),
Paragraph(escribe(utils.float2str(d[2])),estilo_numeros_tabla),
Paragraph(escribe(utils.float2str(d[3])),estilo_numeros_tabla),
Paragraph(escribe(utils.float2str(d[2] * d[3])),
estilo_numeros_tabla),
escribe(d[4])
)
datos.append(fila)
tabla = Table(datos,
colWidths = (PAGE_WIDTH * 0.13,
PAGE_WIDTH * 0.35,
PAGE_WIDTH * 0.09,
PAGE_WIDTH * 0.09,
PAGE_WIDTH * 0.13,
PAGE_WIDTH * 0.11),
repeatRows = 1)
tabla.setStyle(TableStyle([
("BACKGROUND", (0, 0), (-1, 0), colors.lightgrey),
("LINEBEFORE", (0, 0), (-1, 0), 0.25, colors.black),
("LINEBELOW", (0, 0), (-1, 0), 1.0, colors.black),
("LINEBELOW", (0, "splitlast"), (-1, "splitlast"), 1.0, colors.black),
#("BOX", (0, 0), (-1, -1), 1.0, colors.black),
("LINEABOVE", (0, 0), (-1, 0), 1.0, colors.black),
("LINEBEFORE", (0, 0), (0, -1), 1.0, colors.black),
("LINEAFTER", (-1, 0), (-1, -1), 1.0, colors.black),
#("INNERGRID", (0, 0), (-1, -1), 0.25, colors.black),
("VALIGN", (0, 0), (-1, 0), "CENTER"),
("VALIGN", (0, 0), (0, -1), "TOP"),
("ALIGN", (0, 0), (-1, 0), "CENTER"),
("ALIGN", (-3, 1), (-1, -1), "RIGHT"),
#("ALIGN", (0, 1), (0, -1), "DECIMAL"), <- No puedo cambiar
# el pivotChar de "." a ",". No me vale.
("ALIGN", (-1, 1), (-1, -1), "CENTER"),
("ALIGN", (0, 1), (0, -1), "CENTER"),
#("RIGHTPADDING", (0, 1), (0, -1), 0.75 * cm),
]))
return tabla
#class TablaFija(Table):
# """
# Tabla pero con la esquina arriba-izquierda forzada a una posición.
# """
# def __init__(self, ox, oy, *args, **kw):
# Table.__init__(self, *args, **kw)
# self.ox = ox
# self.oy = oy
# _old_drawOn = Table.drawOn
#
# def drawOn(self, canvas, x = None, y = None, *args, **kw):
# x = self.ox
# y = self.oy
# self._old_drawOn(canvas, x, y, *args, **kw)
def build_tabla_totales(totales):
"""
Construye una tabla con los totales del albaranSalida.
La tabla tiene dos filas, cabecera y desglose. La variable «totales» es
una lista con los totales *en el siguiente orden*:
base imponible, porcentaje IVA en fracción de 1, y total.
La base imponible incluye los descuentos de las LDVs y demás.
"""
datos = [["Base imponible", "%d%% IVA" % (totales[1]*100), "Total"],
[totales[0], totales[2] - totales[0], totales[2]]]
datos = sanitize(datos)
estilo_numeros_tabla = ParagraphStyle("Números tabla",
parent=estilos["Normal"])
estilo_numeros_tabla.alignment = enums.TA_RIGHT
estilo_numeros_tabla.fontSize += 2
datos = [[Paragraph(celda, estilos["Normal"]) for celda in datos[0]] ,
[Paragraph(celda, estilo_numeros_tabla) for celda in datos[1]]]
tabla = TablaFija(78,
2*cm,
datos,
colWidths = (PAGE_WIDTH * (0.9/3),)*3)
#tabla = Table(datos,
# colWidths = (PAGE_WIDTH * (0.9/3),)*3)
tabla.setStyle(TableStyle([
("BACKGROUND", (0, 0), (-1, 0), colors.lightgrey),
("LINEBELOW", (0, 0), (-1, 0), 1.0, colors.black),
("BOX", (0, 0), (-1, -1), 1.0, colors.black),
("INNERGRID", (0, 0), (-1, -1), 1.0, colors.black),
("ALIGN", (0, 0), (-1, 0), "LEFT"),
]))
return tabla
def build_marco_logo_y_empresa(dde):
"""
dde es una lista con la ruta al logotipo de la empresa (o None) y una
serie de líneas de texto con los datos a mostrar de la empresa.
Devuelve una tabla con los marcos transparentes con el logo y las
líneas.
"""
if dde[0] != None:
logo = Image(dde[0])
logo.drawHeight = 2*cm * logo.drawHeight / logo.drawWidth
logo.drawWidth = 2*cm
else:
logo = Paragraph("", estilos["Normal"])
lineas_empresa = dde[1:]
if len(lineas_empresa) <= 3:
empresa = Preformatted("\n".join(lineas_empresa), estilos["Normal"])
else:
texto_empresa = lineas_empresa[0] + "\n"
#+ ". ".join(lineas_empresa[1:])
resto_lineas = lineas_empresa[1:]
pivot = len(resto_lineas)/2
r1, r2 = resto_lineas[:pivot], resto_lineas[pivot:]
texto_empresa += ". ".join(r1) + "\n" + ". ".join(r2)
empresa = Preformatted(texto_empresa, estilos["Normal"])
datos = [[logo, empresa]]
tabla = Table(datos,
colWidths = (PAGE_WIDTH * 0.25,
PAGE_WIDTH * 0.65))
tabla.setStyle(TableStyle([
("ALIGN", (0, 0), (1, 0), "RIGHT"),
("ALIGN", (1, 0), (-1, -1), "LEFT"),
("VALIGN", (0, 0), (-1, -1), "CENTER"),
]))
return tabla
def build_logo_y_empresa_por_separado(dde):
"""
Ganas de matar aumentando...
dde es una lista con la ruta al logotipo de la empresa (o None) y una
serie de líneas de texto con los datos a mostrar de la empresa.
Devuelve una imagen con el logotipo y una lista de líneas con los
datos de la empresa para dibujarlas (drawText) al lado.
Si no hay logo, devuelve None y la lista de líneas.
"""
if dde[0] != None:
logo = Image(dde[0])
logo.drawHeight = 2*cm * logo.drawHeight / logo.drawWidth
logo.drawWidth = 2*cm
else:
logo = None
lineas_empresa = dde[1:]
if len(lineas_empresa) <= 3:
while len(lineas_empresa) < 3:
lineas_empresa.append("")
empresa = lineas_empresa
else:
texto_empresa = lineas_empresa[0] + "\n"
#+ ". ".join(lineas_empresa[1:])
resto_lineas = lineas_empresa[1:]
pivot = len(resto_lineas)/2
r1, r2 = resto_lineas[:pivot], resto_lineas[pivot:]
texto_empresa += ". ".join(r1) + "\n" + ". ".join(r2)
# Escuse moi, pero necesito aprovechar lo que ya hay. Un split no
# hace daño a nadie, si acaso "un poquito" al rendimiento.
lineas_empresa = texto_empresa.split("\n")
return logo, lineas_empresa
def go(titulo,
ruta_archivo,
datos_cliente,
datos_albaran,
lineas_contenido,
totales,
datos_de_la_empresa):
"""
Recibe el título del documento y la ruta completa del archivo PDF,
los datos del cliente en un diccionario, los del albarán también como
diccionario, las líneas como lista de listas, los totales como lista
y otra lista con la ruta al logotipo como primer elemento y tantas líneas
como datos de la empresa se quieran mostrar junto al logo.
Devuelve uno o dos nombres de ficheros en PDF con el albarán/carta de
porte y (opcionalmente) el documento de recogida de envases.
"""
doc = SimpleDocTemplate(ruta_archivo,
title = titulo,
topMargin = 4.30*cm,
bottomMargin = 2*cm)
#leftMargin = 1*cm,
#rigthMargin = 1*cm)
contenido = build_tabla_contenido(lineas_contenido)
totales = build_tabla_totales(totales)
#texto = build_texto()
story = [#Spacer(1, 1.60 * cm), # Chispa más o menos los datos de cliente.
#encabezado,
Spacer(1, 3.50 * cm),
contenido,
Linea((1.05*cm, 24.5*cm - 3*cm),
(1.05*cm, 2.5*cm + 0.25*cm)),
Linea((PAGE_WIDTH - 1.05*cm, 24.5*cm - 3*cm),
(PAGE_WIDTH - 1.05*cm, 2.5*cm + 0.25*cm)),
Spacer(1, 0.15 * cm),
totales,
lastPageNumberFlowable(0.9*PAGE_WIDTH - 0.5*cm + 1, 1.0*cm)]
#Spacer(1, 0.15 * cm),
# Línea doble.
#KeepTogether([LineaHorizontal(0.9 * PAGE_WIDTH),
# Spacer(1, 0.05 * cm),
# LineaHorizontal(0.9 * PAGE_WIDTH)]),
#Spacer(1, 0.15 * cm),
#CondPageBreak(13*cm),
#logo_y_empresa,
#Spacer(1, 0.25 * cm),
#texto]
story = utils.aplanar([i for i in story if i])
_cabecera_y_cliente = lambda c, d: cabecera_y_cliente(c,
d,
datos_cliente,
datos_de_la_empresa,
datos_albaran)
_solo_cabecera = lambda c, d: solo_cabecera(c,
d,
datos_de_la_empresa,
datos_albaran)
doc.build(story,
onFirstPage = _cabecera_y_cliente,
onLaterPages = _solo_cabecera)
return ruta_archivo
def go_from_albaranSalida(albaranSalida):
"""
Construye el PDF a partir de un objeto albaranSalida y no de sus datos
sueltos.
"""
cliente = albaranSalida.cliente
datos_cliente = {"código": cliente.id,
"cif": cliente.cif,
"razón social": cliente.nombre,
"dirección": cliente.direccion,
"población": cliente.ciudad,
"provincia": cliente.provincia}
if cliente.cp and cliente.cp.strip():
datos_cliente["población"] = (cliente.cp + " - "
+ datos_cliente["población"])
datos_albaran = {"fecha": utils.str_fecha(albaranSalida.fecha),
"número": albaranSalida.numalbaran,
"codcliente": albaranSalida.cliente
and `albaranSalida.cliente.id`
or ""}
iva = cliente.iva
lineas_contenido = [(ldv.producto.codigo,
ldv.producto.descripcion,
ldv.cantidad,
ldv.precio * (1.0 - ldv.descuento) * (1 + iva),
ldv.pedidoVenta and ldv.pedidoVenta.numpedido or "")
for ldv in albaranSalida.lineasDeVenta]
totales = [albaranSalida.calcular_total(iva_incluido = False),
iva,
albaranSalida.calcular_total(iva_incluido = True)]
try:
dde = pclases.DatosDeLaEmpresa.select()[0]
datos_de_la_empresa = [os.path.join("..", "imagenes", dde.logo),
dde.nombre +
(dde.cif and " (" + dde.str_cif_o_nif() +": " + dde.cif + ")" or ""),
dde.direccion,
"%s %s (%s), %s" % (dde.cp,
dde.ciudad,
dde.provincia,
dde.pais),
]
if dde.fax:
if dde.fax.strip() == dde.telefono.strip():
datos_de_la_empresa.append("Telf. y fax: %s" % dde.telefono)
else:
datos_de_la_empresa.append("Telf.: %s" % (dde.telefono))
datos_de_la_empresa.append("Fax: %s" % (dde.fax))
if dde.email:
datos_de_la_empresa.append(dde.email)
except IndexError:
lineas_empresa = [None]
nomarchivo = os.path.join(gettempdir(),
"albaranSalida_%s.pdf" % give_me_the_name_baby())
return go("Albaran de salida %s (%s)" % (
albaranSalida.cliente.nombre,
utils.str_fecha(albaranSalida.fecha)),
nomarchivo,
datos_cliente,
datos_albaran,
lineas_contenido,
totales,
datos_de_la_empresa)
if __name__ == "__main__":
try:
print go_from_albaranSalida(pclases.AlbaranSalida.select()[-2])
except Exception, msg:
sys.stderr.write(`msg`)
datos_cliente = {"código": 123,
"cif": "12345678-Z",
"razón social": "Fulanito de tal y pascual, S.L.U.",
"dirección": "Rue del percebe, 13. 4º B",
"población": "Chiquitistán",
"provincia": "Huelva"}
datos_albaran = {"fecha": "10/06/2008",
"número": "A12314",
"codcliente": "12"}
lineas_contenido = [
("COD1", "Una cosa "*20, 1.234, 1.245, "PEDIDO 1"),
("", "Grñai mama", 1, 1, ""),
("1234567890123", "Otra cosa", 10, 0.33, "123154a")] * 7
totales = [100.0, 0.16, 116.0]
datos_de_la_empresa = ["../imagenes/dorsia.png",
"Línea empresa 1",
"Línea empresa 2",
"Línea empresa 3"]
print go("AlbaranSalida",
"/tmp/albaranSalida.pdf",
datos_cliente,
datos_albaran,
lineas_contenido,
totales,
datos_de_la_empresa)
| pacoqueen/bbinn | informes/albaran_multipag.py | Python | gpl-2.0 | 26,260 | 0.01394 |
#!/usr/bin/python
"""A script to get information from MrTijn's new server so I (MrMadsenMalmo) can display it
on a website
"""
__author__ = "MrMadsenMalmo - Fredrik A. Madsen-Malmo & Tijndagamer"
import os
import time
import re
import datetime
def main():
dataList = []
dataList.append(os.popen("uptime").read() + "\n")
dataList.append(os.popen("cpuload").read())
dataList.append("CPU temp: \n" + os.popen("getcputemp").read())
dataList.append("Network stats:\n" + os.popen("getdown").read())
dataList.append(os.popen("getup").read() + "\n")
dataList.append("Memory stats:\n" + os.popen("free -h").read() + "\n")
dataList.append("Drive stats: TOTAL | USED | FREE\n" + os.popen("df -h | grep '/dev/' && df -h --total | grep 'total'").read())
data = str(dataList)
data = data.replace('[', '')
data = data.replace(']', '')
data = data.replace(',', '')
# os.popen("echo " + data + " > /var/www/html/status")
# for data in dataList:
# print data
with open("/var/www/html/status.txt", "w+") as file:
for data in dataList:
file.write(data)
write()
def get_time():
return re.search("\d\d\:\d\d\:\d\d", os.popen("uptime").read(), re.VERBOSE).group(0) + "\n"
def get_load():
return re.search("CPU\sload\:\s([\d\.]+\%)", os.popen("cpuload").read(), re.VERBOSE).group(1) + "\n"
def get_temp():
return re.search("[\w]+\:\s([\d\.]+)", os.popen("getcputemp").read(), re.VERBOSE).group(1) + "C\n"
def write(time=""):
for type in [["temp", get_temp], ["load", get_load]]:
with open(type[0] + time + ".txt", "a+") as file:
if time == 'day':
file.write(str(datetime.datetime.today()).split(' ')[0] + type[1])
else:
file.write(get_time() + type[1]())
prev_time = get_time()
prev_day = datetime.datetime.today().day
while True:
# minute change
if get_time()[3:5] != prev_time[3:5]:
write("min")
# hour change
if get_time()[0:2] != prev_time[0:2]:
write("hr")
# day change
if datetime.datetime.today().day != prev_day:
write("day")
main()
prev_time = get_time()
prev_day = datetime.datetime.today().day
time.sleep(5)
| FredrikAugust/server-status | statuspagescript.py | Python | mit | 2,247 | 0.015576 |
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
def validKeyWordAfterCode(content, index):
keyWords = ["for", "do", "count", "each", "forEach", "else", "and", "not", "isEqualTo", "in", "call", "spawn", "execVM", "catch"];
for word in keyWords:
try:
subWord = content.index(word, index, index+len(word))
return True;
except:
pass
return False
def check_sqf_syntax(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r', encoding='utf-8', errors='ignore') as file:
content = file.read()
# Store all brackets we find in this file, so we can validate everything on the end
brackets_list = []
# To check if we are in a comment block
isInCommentBlock = False
checkIfInComment = False
# Used in case we are in a line comment (//)
ignoreTillEndOfLine = False
# Used in case we are in a comment block (/* */). This is true if we detect a * inside a comment block.
# If the next character is a /, it means we end our comment block.
checkIfNextIsClosingBlock = False
# We ignore everything inside a string
isInString = False
# Used to store the starting type of a string, so we can match that to the end of a string
inStringType = '';
lastIsCurlyBrace = False
checkForSemiColumn = False
# Extra information so we know what line we find errors at
lineNumber = 0
indexOfCharacter = 0
# Parse all characters in the content of this file to search for potential errors
for c in content:
if (lastIsCurlyBrace):
lastIsCurlyBrace = False
checkForSemiColumn = True
if c == '\n': # Keeping track of our line numbers
lineNumber += 1 # so we can print accurate line number information when we detect a possible error
if (isInString): # while we are in a string, we can ignore everything else, except the end of the string
if (c == inStringType):
isInString = False
# if we are not in a comment block, we will check if we are at the start of one or count the () {} and []
elif (isInCommentBlock == False):
# This means we have encountered a /, so we are now checking if this is an inline comment or a comment block
if (checkIfInComment):
checkIfInComment = False
if c == '*': # if the next character after / is a *, we are at the start of a comment block
isInCommentBlock = True
elif (c == '/'): # Otherwise, will check if we are in an line comment
ignoreTillEndOfLine = True # and an line comment is a / followed by another / (//) We won't care about anything that comes after it
if (isInCommentBlock == False):
if (ignoreTillEndOfLine): # we are in a line comment, just continue going through the characters until we find an end of line
if (c == '\n'):
ignoreTillEndOfLine = False
else: # validate brackets
if (c == '"' or c == "'"):
isInString = True
inStringType = c
elif (c == '#'):
ignoreTillEndOfLine = True
elif (c == '/'):
checkIfInComment = True
elif (c == '('):
brackets_list.append('(')
elif (c == ')'):
if (brackets_list[-1] in ['{', '[']):
print("ERROR: Possible missing round bracket ')' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append(')')
elif (c == '['):
brackets_list.append('[')
elif (c == ']'):
if (brackets_list[-1] in ['{', '(']):
print("ERROR: Possible missing square bracket ']' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append(']')
elif (c == '{'):
brackets_list.append('{')
elif (c == '}'):
lastIsCurlyBrace = True
if (brackets_list[-1] in ['(', '[']):
print("ERROR: Possible missing curly brace '}}' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append('}')
elif (c== '\t'):
print("ERROR: Tab detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
if (checkForSemiColumn):
if (c not in [' ', '\t', '\n', '/']): # keep reading until no white space or comments
checkForSemiColumn = False
if (c not in [']', ')', '}', ';', ',', '&', '!', '|', '='] and not validKeyWordAfterCode(content, indexOfCharacter)): # , 'f', 'd', 'c', 'e', 'a', 'n', 'i']):
print("ERROR: Possible missing semi-column ';' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
else: # Look for the end of our comment block
if (c == '*'):
checkIfNextIsClosingBlock = True;
elif (checkIfNextIsClosingBlock):
if (c == '/'):
isInCommentBlock = False
elif (c != '*'):
checkIfNextIsClosingBlock = False
indexOfCharacter += 1
if brackets_list.count('[') != brackets_list.count(']'):
print("ERROR: A possible missing square bracket [ or ] in file {0} [ = {1} ] = {2}".format(filepath,brackets_list.count('['),brackets_list.count(']')))
bad_count_file += 1
if brackets_list.count('(') != brackets_list.count(')'):
print("ERROR: A possible missing round bracket ( or ) in file {0} ( = {1} ) = {2}".format(filepath,brackets_list.count('('),brackets_list.count(')')))
bad_count_file += 1
if brackets_list.count('{') != brackets_list.count('}'):
print("ERROR: A possible missing curly brace {{ or }} in file {0} {{ = {1} }} = {2}".format(filepath,brackets_list.count('{'),brackets_list.count('}')))
bad_count_file += 1
return bad_count_file
def main():
print("Validating SQF")
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default="")
args = parser.parse_args()
# Allow running from root directory as well as from inside the tools directory
rootDir = "../addons"
if (os.path.exists("addons")):
rootDir = "addons"
for root, dirnames, filenames in os.walk(rootDir + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
bad_count = bad_count + check_sqf_syntax(filename)
print("------\nChecked {0} files\nErrors detected: {1}".format(len(sqf_list), bad_count))
if (bad_count == 0):
print("SQF validation PASSED")
else:
print("SQF validation FAILED")
return bad_count
if __name__ == "__main__":
sys.exit(main())
| osasto-kuikka/KGE | tools/sqf_validator.py | Python | gpl-2.0 | 8,231 | 0.008018 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Exception for errors when there's an error in the Result
"""
from qiskit.exceptions import QiskitError
class ResultError(QiskitError):
"""Exceptions raised due to errors in result output.
It may be better for the Qiskit API to raise this exception.
Args:
error (dict): This is the error record as it comes back from
the API. The format is like::
error = {'status': 403,
'message': 'Your credits are not enough.',
'code': 'MAX_CREDITS_EXCEEDED'}
"""
def __init__(self, error):
super().__init__(error['message'])
self.status = error['status']
self.code = error['code']
def __str__(self):
return '{}: {}'.format(self.code, self.message)
| QISKit/qiskit-sdk-py | qiskit/result/exceptions.py | Python | apache-2.0 | 1,290 | 0 |
# -*- coding: utf-8 -*-
"""
Code to manage fetching and storing the metadata of IdPs.
"""
#pylint: disable=no-member
from celery.task import task # pylint: disable=import-error,no-name-in-module
import datetime
import dateutil.parser
import logging
from lxml import etree
import requests
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from third_party_auth.models import SAMLConfiguration, SAMLProviderConfig, SAMLProviderData
log = logging.getLogger(__name__)
SAML_XML_NS = 'urn:oasis:names:tc:SAML:2.0:metadata' # The SAML Metadata XML namespace
class MetadataParseError(Exception):
""" An error occurred while parsing the SAML metadata from an IdP """
pass
@task(name='third_party_auth.fetch_saml_metadata')
def fetch_saml_metadata():
"""
Fetch and store/update the metadata of all IdPs
This task should be run on a daily basis.
It's OK to run this whether or not SAML is enabled.
Return value:
tuple(num_changed, num_failed, num_total)
num_changed: Number of providers that are either new or whose metadata has changed
num_failed: Number of providers that could not be updated
num_total: Total number of providers whose metadata was fetched
"""
if not SAMLConfiguration.is_enabled():
return (0, 0, 0) # Nothing to do until SAML is enabled.
num_changed, num_failed = 0, 0
# First make a list of all the metadata XML URLs:
url_map = {}
for idp_slug in SAMLProviderConfig.key_values('idp_slug', flat=True):
config = SAMLProviderConfig.current(idp_slug)
if not config.enabled:
continue
url = config.metadata_source
if url not in url_map:
url_map[url] = []
if config.entity_id not in url_map[url]:
url_map[url].append(config.entity_id)
# Now fetch the metadata:
for url, entity_ids in url_map.items():
try:
log.info("Fetching %s", url)
if not url.lower().startswith('https'):
log.warning("This SAML metadata URL is not secure! It should use HTTPS. (%s)", url)
response = requests.get(url, verify=True) # May raise HTTPError or SSLError or ConnectionError
response.raise_for_status() # May raise an HTTPError
try:
parser = etree.XMLParser(remove_comments=True)
xml = etree.fromstring(response.text, parser)
except etree.XMLSyntaxError:
raise
# TODO: Can use OneLogin_Saml2_Utils to validate signed XML if anyone is using that
for entity_id in entity_ids:
log.info(u"Processing IdP with entityID %s", entity_id)
public_key, sso_url, expires_at = _parse_metadata_xml(xml, entity_id)
changed = _update_data(entity_id, public_key, sso_url, expires_at)
if changed:
log.info(u"→ Created new record for SAMLProviderData")
num_changed += 1
else:
log.info(u"→ Updated existing SAMLProviderData. Nothing has changed.")
except Exception as err: # pylint: disable=broad-except
log.exception(err.message)
num_failed += 1
return (num_changed, num_failed, len(url_map))
def _parse_metadata_xml(xml, entity_id):
"""
Given an XML document containing SAML 2.0 metadata, parse it and return a tuple of
(public_key, sso_url, expires_at) for the specified entityID.
Raises MetadataParseError if anything is wrong.
"""
if xml.tag == etree.QName(SAML_XML_NS, 'EntityDescriptor'):
entity_desc = xml
else:
if xml.tag != etree.QName(SAML_XML_NS, 'EntitiesDescriptor'):
raise MetadataParseError("Expected root element to be <EntitiesDescriptor>, not {}".format(xml.tag))
entity_desc = xml.find(
".//{}[@entityID='{}']".format(etree.QName(SAML_XML_NS, 'EntityDescriptor'), entity_id)
)
if not entity_desc:
raise MetadataParseError("Can't find EntityDescriptor for entityID {}".format(entity_id))
expires_at = None
if "validUntil" in xml.attrib:
expires_at = dateutil.parser.parse(xml.attrib["validUntil"])
if "cacheDuration" in xml.attrib:
cache_expires = OneLogin_Saml2_Utils.parse_duration(xml.attrib["cacheDuration"])
if expires_at is None or cache_expires < expires_at:
expires_at = cache_expires
sso_desc = entity_desc.find(etree.QName(SAML_XML_NS, "IDPSSODescriptor"))
if not sso_desc:
raise MetadataParseError("IDPSSODescriptor missing")
if 'urn:oasis:names:tc:SAML:2.0:protocol' not in sso_desc.get("protocolSupportEnumeration"):
raise MetadataParseError("This IdP does not support SAML 2.0")
# Now we just need to get the public_key and sso_url
public_key = sso_desc.findtext("./{}//{}".format(
etree.QName(SAML_XML_NS, "KeyDescriptor"), "{http://www.w3.org/2000/09/xmldsig#}X509Certificate"
))
if not public_key:
raise MetadataParseError("Public Key missing. Expected an <X509Certificate>")
public_key = public_key.replace(" ", "")
binding_elements = sso_desc.iterfind("./{}".format(etree.QName(SAML_XML_NS, "SingleSignOnService")))
sso_bindings = {element.get('Binding'): element.get('Location') for element in binding_elements}
try:
# The only binding supported by python-saml and python-social-auth is HTTP-Redirect:
sso_url = sso_bindings['urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect']
except KeyError:
raise MetadataParseError("Unable to find SSO URL with HTTP-Redirect binding.")
return public_key, sso_url, expires_at
def _update_data(entity_id, public_key, sso_url, expires_at):
"""
Update/Create the SAMLProviderData for the given entity ID.
Return value:
False if nothing has changed and existing data's "fetched at" timestamp is just updated.
True if a new record was created. (Either this is a new provider or something changed.)
"""
data_obj = SAMLProviderData.current(entity_id)
fetched_at = datetime.datetime.now()
if data_obj and (data_obj.public_key == public_key and data_obj.sso_url == sso_url):
data_obj.expires_at = expires_at
data_obj.fetched_at = fetched_at
data_obj.save()
return False
else:
SAMLProviderData.objects.create(
entity_id=entity_id,
fetched_at=fetched_at,
expires_at=expires_at,
sso_url=sso_url,
public_key=public_key,
)
return True
| mushtaqak/edx-platform | common/djangoapps/third_party_auth/tasks.py | Python | agpl-3.0 | 6,642 | 0.003917 |
## @file
# Routines for generating Pcd Database
#
# Copyright (c) 2013 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
from StringIO import StringIO
from Common.Misc import *
from Common.String import StringToArray
from struct import pack
from ValidCheckingInfoObject import VAR_CHECK_PCD_VARIABLE_TAB_CONTAINER
from ValidCheckingInfoObject import VAR_CHECK_PCD_VARIABLE_TAB
from ValidCheckingInfoObject import VAR_VALID_OBJECT_FACTORY
from Common.VariableAttributes import VariableAttributes
DATABASE_VERSION = 6
gPcdDatabaseAutoGenC = TemplateString("""
//
// External PCD database debug information
//
#if 0
${PHASE}_PCD_DATABASE_INIT g${PHASE}PcdDbInit = {
/* SkuIdTable */
{ ${BEGIN}${SKUID_VALUE}, ${END} },
${BEGIN} { ${INIT_VALUE_UINT64} }, /* ${INIT_CNAME_DECL_UINT64}_${INIT_GUID_DECL_UINT64}[${INIT_NUMSKUS_DECL_UINT64}] */
${END}
${BEGIN} ${VARDEF_VALUE_UINT64}, /* ${VARDEF_CNAME_UINT64}_${VARDEF_GUID_UINT64}_VariableDefault_${VARDEF_SKUID_UINT64} */
${END}
${BEGIN} { ${INIT_VALUE_UINT32} }, /* ${INIT_CNAME_DECL_UINT32}_${INIT_GUID_DECL_UINT32}[${INIT_NUMSKUS_DECL_UINT32}] */
${END}
${BEGIN} ${VARDEF_VALUE_UINT32}, /* ${VARDEF_CNAME_UINT32}_${VARDEF_GUID_UINT32}_VariableDefault_${VARDEF_SKUID_UINT32} */
${END}
/* VPD */
${BEGIN} { ${VPD_HEAD_VALUE} }, /* ${VPD_HEAD_CNAME_DECL}_${VPD_HEAD_GUID_DECL}[${VPD_HEAD_NUMSKUS_DECL}] */
${END}
/* ExMapTable */
{
${BEGIN} { ${EXMAPPING_TABLE_EXTOKEN}, ${EXMAPPING_TABLE_LOCAL_TOKEN}, ${EXMAPPING_TABLE_GUID_INDEX} },
${END}
},
/* LocalTokenNumberTable */
{
${BEGIN} offsetof(${PHASE}_PCD_DATABASE, ${TOKEN_INIT}.${TOKEN_CNAME}_${TOKEN_GUID}${VARDEF_HEADER}) | ${TOKEN_TYPE},
${END}
},
/* GuidTable */
{
${BEGIN} ${GUID_STRUCTURE},
${END}
},
${BEGIN} { ${STRING_HEAD_VALUE} }, /* ${STRING_HEAD_CNAME_DECL}_${STRING_HEAD_GUID_DECL}[${STRING_HEAD_NUMSKUS_DECL}] */
${END}
${BEGIN} /* ${VARIABLE_HEAD_CNAME_DECL}_${VARIABLE_HEAD_GUID_DECL}_Variable_Header[${VARIABLE_HEAD_NUMSKUS_DECL}] */
{
${VARIABLE_HEAD_VALUE}
},
${END}
/* SkuHead */
{
${BEGIN} offsetof (${PHASE}_PCD_DATABASE, ${TOKEN_INIT}.${TOKEN_CNAME}_${TOKEN_GUID}${VARDEF_HEADER}) | ${TOKEN_TYPE}, /* */
offsetof (${PHASE}_PCD_DATABASE, ${TOKEN_INIT}.SkuHead) /* */
${END}
},
/* StringTable */
${BEGIN} ${STRING_TABLE_VALUE}, /* ${STRING_TABLE_CNAME}_${STRING_TABLE_GUID} */
${END}
/* SizeTable */
{
${BEGIN} ${SIZE_TABLE_MAXIMUM_LENGTH}, ${SIZE_TABLE_CURRENT_LENGTH}, /* ${SIZE_TABLE_CNAME}_${SIZE_TABLE_GUID} */
${END}
},
${BEGIN} { ${INIT_VALUE_UINT16} }, /* ${INIT_CNAME_DECL_UINT16}_${INIT_GUID_DECL_UINT16}[${INIT_NUMSKUS_DECL_UINT16}] */
${END}
${BEGIN} ${VARDEF_VALUE_UINT16}, /* ${VARDEF_CNAME_UINT16}_${VARDEF_GUID_UINT16}_VariableDefault_${VARDEF_SKUID_UINT16} */
${END}
${BEGIN} { ${INIT_VALUE_UINT8} }, /* ${INIT_CNAME_DECL_UINT8}_${INIT_GUID_DECL_UINT8}[${INIT_NUMSKUS_DECL_UINT8}] */
${END}
${BEGIN} ${VARDEF_VALUE_UINT8}, /* ${VARDEF_CNAME_UINT8}_${VARDEF_GUID_UINT8}_VariableDefault_${VARDEF_SKUID_UINT8} */
${END}
${BEGIN} { ${INIT_VALUE_BOOLEAN} }, /* ${INIT_CNAME_DECL_BOOLEAN}_${INIT_GUID_DECL_BOOLEAN}[${INIT_NUMSKUS_DECL_BOOLEAN}] */
${END}
${BEGIN} ${VARDEF_VALUE_BOOLEAN}, /* ${VARDEF_CNAME_BOOLEAN}_${VARDEF_GUID_BOOLEAN}_VariableDefault_${VARDEF_SKUID_BOOLEAN} */
${END}
${SYSTEM_SKU_ID_VALUE}
};
#endif
""")
## Mapping between PCD driver type and EFI phase
gPcdPhaseMap = {
"PEI_PCD_DRIVER" : "PEI",
"DXE_PCD_DRIVER" : "DXE"
}
gPcdDatabaseAutoGenH = TemplateString("""
#define PCD_${PHASE}_SERVICE_DRIVER_VERSION ${SERVICE_DRIVER_VERSION}
//
// External PCD database debug information
//
#if 0
#define ${PHASE}_GUID_TABLE_SIZE ${GUID_TABLE_SIZE}
#define ${PHASE}_STRING_TABLE_SIZE ${STRING_TABLE_SIZE}
#define ${PHASE}_SKUID_TABLE_SIZE ${SKUID_TABLE_SIZE}
#define ${PHASE}_LOCAL_TOKEN_NUMBER_TABLE_SIZE ${LOCAL_TOKEN_NUMBER_TABLE_SIZE}
#define ${PHASE}_LOCAL_TOKEN_NUMBER ${LOCAL_TOKEN_NUMBER}
#define ${PHASE}_EXMAPPING_TABLE_SIZE ${EXMAPPING_TABLE_SIZE}
#define ${PHASE}_EX_TOKEN_NUMBER ${EX_TOKEN_NUMBER}
#define ${PHASE}_SIZE_TABLE_SIZE ${SIZE_TABLE_SIZE}
#define ${PHASE}_SKU_HEAD_SIZE ${SKU_HEAD_SIZE}
#define ${PHASE}_GUID_TABLE_EMPTY ${GUID_TABLE_EMPTY}
#define ${PHASE}_STRING_TABLE_EMPTY ${STRING_TABLE_EMPTY}
#define ${PHASE}_SKUID_TABLE_EMPTY ${SKUID_TABLE_EMPTY}
#define ${PHASE}_DATABASE_EMPTY ${DATABASE_EMPTY}
#define ${PHASE}_EXMAP_TABLE_EMPTY ${EXMAP_TABLE_EMPTY}
typedef struct {
UINT64 SkuIdTable[${PHASE}_SKUID_TABLE_SIZE];
${BEGIN} UINT64 ${INIT_CNAME_DECL_UINT64}_${INIT_GUID_DECL_UINT64}[${INIT_NUMSKUS_DECL_UINT64}];
${END}
${BEGIN} UINT64 ${VARDEF_CNAME_UINT64}_${VARDEF_GUID_UINT64}_VariableDefault_${VARDEF_SKUID_UINT64};
${END}
${BEGIN} UINT32 ${INIT_CNAME_DECL_UINT32}_${INIT_GUID_DECL_UINT32}[${INIT_NUMSKUS_DECL_UINT32}];
${END}
${BEGIN} UINT32 ${VARDEF_CNAME_UINT32}_${VARDEF_GUID_UINT32}_VariableDefault_${VARDEF_SKUID_UINT32};
${END}
${BEGIN} VPD_HEAD ${VPD_HEAD_CNAME_DECL}_${VPD_HEAD_GUID_DECL}[${VPD_HEAD_NUMSKUS_DECL}];
${END}
DYNAMICEX_MAPPING ExMapTable[${PHASE}_EXMAPPING_TABLE_SIZE];
UINT32 LocalTokenNumberTable[${PHASE}_LOCAL_TOKEN_NUMBER_TABLE_SIZE];
GUID GuidTable[${PHASE}_GUID_TABLE_SIZE];
${BEGIN} STRING_HEAD ${STRING_HEAD_CNAME_DECL}_${STRING_HEAD_GUID_DECL}[${STRING_HEAD_NUMSKUS_DECL}];
${END}
${BEGIN} VARIABLE_HEAD ${VARIABLE_HEAD_CNAME_DECL}_${VARIABLE_HEAD_GUID_DECL}_Variable_Header[${VARIABLE_HEAD_NUMSKUS_DECL}];
${END}
${BEGIN} SKU_HEAD SkuHead[${PHASE}_SKU_HEAD_SIZE];
${END}
${BEGIN} UINT8 StringTable${STRING_TABLE_INDEX}[${STRING_TABLE_LENGTH}]; /* ${STRING_TABLE_CNAME}_${STRING_TABLE_GUID} */
${END}
SIZE_INFO SizeTable[${PHASE}_SIZE_TABLE_SIZE];
${BEGIN} UINT16 ${INIT_CNAME_DECL_UINT16}_${INIT_GUID_DECL_UINT16}[${INIT_NUMSKUS_DECL_UINT16}];
${END}
${BEGIN} UINT16 ${VARDEF_CNAME_UINT16}_${VARDEF_GUID_UINT16}_VariableDefault_${VARDEF_SKUID_UINT16};
${END}
${BEGIN} UINT8 ${INIT_CNAME_DECL_UINT8}_${INIT_GUID_DECL_UINT8}[${INIT_NUMSKUS_DECL_UINT8}];
${END}
${BEGIN} UINT8 ${VARDEF_CNAME_UINT8}_${VARDEF_GUID_UINT8}_VariableDefault_${VARDEF_SKUID_UINT8};
${END}
${BEGIN} BOOLEAN ${INIT_CNAME_DECL_BOOLEAN}_${INIT_GUID_DECL_BOOLEAN}[${INIT_NUMSKUS_DECL_BOOLEAN}];
${END}
${BEGIN} BOOLEAN ${VARDEF_CNAME_BOOLEAN}_${VARDEF_GUID_BOOLEAN}_VariableDefault_${VARDEF_SKUID_BOOLEAN};
${END}
${SYSTEM_SKU_ID}
} ${PHASE}_PCD_DATABASE_INIT;
typedef struct {
${PCD_DATABASE_UNINIT_EMPTY}
${BEGIN} UINT64 ${UNINIT_CNAME_DECL_UINT64}_${UNINIT_GUID_DECL_UINT64}[${UNINIT_NUMSKUS_DECL_UINT64}];
${END}
${BEGIN} UINT32 ${UNINIT_CNAME_DECL_UINT32}_${UNINIT_GUID_DECL_UINT32}[${UNINIT_NUMSKUS_DECL_UINT32}];
${END}
${BEGIN} UINT16 ${UNINIT_CNAME_DECL_UINT16}_${UNINIT_GUID_DECL_UINT16}[${UNINIT_NUMSKUS_DECL_UINT16}];
${END}
${BEGIN} UINT8 ${UNINIT_CNAME_DECL_UINT8}_${UNINIT_GUID_DECL_UINT8}[${UNINIT_NUMSKUS_DECL_UINT8}];
${END}
${BEGIN} BOOLEAN ${UNINIT_CNAME_DECL_BOOLEAN}_${UNINIT_GUID_DECL_BOOLEAN}[${UNINIT_NUMSKUS_DECL_BOOLEAN}];
${END}
} ${PHASE}_PCD_DATABASE_UNINIT;
typedef struct {
//GUID Signature; // PcdDataBaseGuid
//UINT32 BuildVersion;
//UINT32 Length;
//SKU_ID SystemSkuId; // Current SkuId value.
//UINT32 UninitDataBaseSize;// Total size for PCD those default value with 0.
//TABLE_OFFSET LocalTokenNumberTableOffset;
//TABLE_OFFSET ExMapTableOffset;
//TABLE_OFFSET GuidTableOffset;
//TABLE_OFFSET StringTableOffset;
//TABLE_OFFSET SizeTableOffset;
//TABLE_OFFSET SkuIdTableOffset;
//TABLE_OFFSET PcdNameTableOffset;
//UINT16 LocalTokenCount; // LOCAL_TOKEN_NUMBER for all
//UINT16 ExTokenCount; // EX_TOKEN_NUMBER for DynamicEx
//UINT16 GuidTableCount; // The Number of Guid in GuidTable
//UINT8 Pad[2];
${PHASE}_PCD_DATABASE_INIT Init;
${PHASE}_PCD_DATABASE_UNINIT Uninit;
} ${PHASE}_PCD_DATABASE;
#define ${PHASE}_NEX_TOKEN_NUMBER (${PHASE}_LOCAL_TOKEN_NUMBER - ${PHASE}_EX_TOKEN_NUMBER)
#endif
""")
gEmptyPcdDatabaseAutoGenC = TemplateString("""
//
// External PCD database debug information
//
#if 0
${PHASE}_PCD_DATABASE_INIT g${PHASE}PcdDbInit = {
/* SkuIdTable */
{ 0 },
/* ExMapTable */
{
{0, 0, 0}
},
/* LocalTokenNumberTable */
{
0
},
/* GuidTable */
{
{0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}
},
/* StringTable */
{ 0 },
/* SkuHead */
{
0, 0
},
/* SizeTable */
{
0, 0
},
${SYSTEM_SKU_ID_VALUE}
};
#endif
""")
## PackGuid
#
# Pack the GUID value in C structure format into data array
#
# @param GuidStructureValue: The GUID value in C structure format
#
# @retval Buffer: a data array contains the Guid
#
def PackGuid(GuidStructureValue):
GuidString = GuidStructureStringToGuidString(GuidStructureValue)
Guid = GuidString.split('-')
Buffer = pack('=LHHBBBBBBBB',
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
return Buffer
def toHex(s):
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', ' ')
if len(hv) == 1:
hv = '0'+hv
lst.append(hv)
if lst:
return reduce(lambda x,y:x+y, lst)
else:
return 'empty'
## DbItemList
#
# The class holds the Pcd database items. ItemSize if not zero should match the item datum type in the C structure.
# When the structure is changed, remember to check the ItemSize and the related PackStr in PackData()
# RawDataList is the RawData that may need some kind of calculation or transformation,
# the DataList corresponds to the data that need to be written to database. If DataList is not present, then RawDataList
# will be written to the database.
#
class DbItemList:
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
self.ItemSize = ItemSize
self.DataList = DataList
self.RawDataList = RawDataList
self.ListSize = 0
def GetInterOffset(self, Index):
Offset = 0
if self.ItemSize == 0:
#
# Variable length, need to calculate one by one
#
assert(Index < len(self.RawDataList))
for ItemIndex in xrange(Index):
Offset += len(self.RawDataList[ItemIndex])
else:
for Datas in self.RawDataList:
Offset = self.ItemSize * Index
return Offset
def GetListSize(self):
if self.ListSize:
return self.ListSize
if len(self.RawDataList) == 0:
self.ListSize = 0
return self.ListSize
if self.ItemSize == 0:
self.ListSize = self.GetInterOffset(len(self.RawDataList) - 1) + len(self.RawDataList[len(self.RawDataList)-1])
else:
self.ListSize = self.ItemSize * len(self.RawDataList)
return self.ListSize
def PackData(self):
if self.ItemSize == 8:
PackStr = "=Q"
elif self.ItemSize == 4:
PackStr = "=L"
elif self.ItemSize == 2:
PackStr = "=H"
elif self.ItemSize == 1:
PackStr = "=B"
elif self.ItemSize == 0:
PackStr = "=B"
elif self.ItemSize == 16:
# pack Guid
PackStr = ''
else:
# should not reach here
assert(False)
Buffer = ''
for Datas in self.RawDataList:
if type(Datas) in (list, tuple):
for Data in Datas:
if PackStr:
Buffer += pack(PackStr, GetIntegerValue(Data))
else:
Buffer += PackGuid(Data)
else:
if PackStr:
Buffer += pack(PackStr, GetIntegerValue(Datas))
else:
Buffer += PackGuid(Datas)
return Buffer
## DbExMapTblItemList
#
# The class holds the ExMap table
#
class DbExMapTblItemList (DbItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def PackData(self):
Buffer = ''
PackStr = "=LHH"
for Datas in self.RawDataList:
Buffer += pack(PackStr,
GetIntegerValue(Datas[0]),
GetIntegerValue(Datas[1]),
GetIntegerValue(Datas[2]))
return Buffer
## DbComItemList
#
# The DbComItemList is a special kind of DbItemList in case that the size of the List can not be computed by the
# ItemSize multiply the ItemCount.
#
class DbComItemList (DbItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def GetInterOffset(self, Index):
Offset = 0
if self.ItemSize == 0:
#
# Variable length, need to calculte one by one
# The only variable table is stringtable, it is not Composite item, should not reach here
#
assert(False)
else:
assert(Index < len(self.RawDataList))
for ItemIndex in xrange(Index):
Offset += len(self.RawDataList[ItemIndex]) * self.ItemSize
return Offset
def GetListSize(self):
if self.ListSize:
return self.ListSize
if self.ItemSize == 0:
assert(False)
else:
if len(self.RawDataList) == 0:
self.ListSize = 0
else:
self.ListSize = self.GetInterOffset(len(self.RawDataList) - 1) + len(self.RawDataList[len(self.RawDataList)-1]) * self.ItemSize
return self.ListSize
def PackData(self):
if self.ItemSize == 8:
PackStr = "=Q"
elif self.ItemSize == 4:
PackStr = "=L"
elif self.ItemSize == 2:
PackStr = "=H"
elif self.ItemSize == 1:
PackStr = "=B"
elif self.ItemSize == 0:
PackStr = "=B"
else:
assert(False)
Buffer = ''
for DataList in self.RawDataList:
for Data in DataList:
if type(Data) in (list, tuple):
for SingleData in Data:
Buffer += pack(PackStr, GetIntegerValue(SingleData))
else:
Buffer += pack(PackStr, GetIntegerValue(Data))
return Buffer
## DbVariableTableItemList
#
# The class holds the Variable header value table
#
class DbVariableTableItemList (DbComItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbComItemList.__init__(self, ItemSize, DataList, RawDataList)
def PackData(self):
PackStr = "=LLHHLHH"
Buffer = ''
for DataList in self.RawDataList:
for Data in DataList:
Buffer += pack(PackStr,
GetIntegerValue(Data[0]),
GetIntegerValue(Data[1]),
GetIntegerValue(Data[2]),
GetIntegerValue(Data[3]),
GetIntegerValue(Data[4]),
GetIntegerValue(Data[5]),
GetIntegerValue(0))
return Buffer
class DbStringHeadTableItemList(DbItemList):
def __init__(self,ItemSize,DataList=None,RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def GetInterOffset(self, Index):
Offset = 0
if self.ItemSize == 0:
#
# Variable length, need to calculate one by one
#
assert(Index < len(self.RawDataList))
for ItemIndex in xrange(Index):
Offset += len(self.RawDataList[ItemIndex])
else:
for innerIndex in range(Index):
if type(self.RawDataList[innerIndex]) in (list, tuple):
Offset += len(self.RawDataList[innerIndex]) * self.ItemSize
else:
Offset += self.ItemSize
return Offset
def GetListSize(self):
if self.ListSize:
return self.ListSize
if len(self.RawDataList) == 0:
self.ListSize = 0
return self.ListSize
if self.ItemSize == 0:
self.ListSize = self.GetInterOffset(len(self.RawDataList) - 1) + len(self.RawDataList[len(self.RawDataList)-1])
else:
for Datas in self.RawDataList:
if type(Datas) in (list, tuple):
self.ListSize += len(Datas) * self.ItemSize
else:
self.ListSize += self.ItemSize
return self.ListSize
## DbSkuHeadTableItemList
#
# The class holds the Sku header value table
#
class DbSkuHeadTableItemList (DbItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def PackData(self):
PackStr = "=LL"
Buffer = ''
for Data in self.RawDataList:
Buffer += pack(PackStr,
GetIntegerValue(Data[0]),
GetIntegerValue(Data[1]))
return Buffer
## DbSizeTableItemList
#
# The class holds the size table
#
class DbSizeTableItemList (DbItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
DbItemList.__init__(self, ItemSize, DataList, RawDataList)
def GetListSize(self):
length = 0
for Data in self.RawDataList:
length += (1 + len(Data[1]))
return length * self.ItemSize
def PackData(self):
PackStr = "=H"
Buffer = ''
for Data in self.RawDataList:
Buffer += pack(PackStr,
GetIntegerValue(Data[0]))
for subData in Data[1]:
Buffer += pack(PackStr,
GetIntegerValue(subData))
return Buffer
## DbStringItemList
#
# The class holds the string table
#
class DbStringItemList (DbComItemList):
def __init__(self, ItemSize, DataList=None, RawDataList=None, LenList=None):
if DataList is None:
DataList = []
if RawDataList is None:
RawDataList = []
if LenList is None:
LenList = []
assert(len(RawDataList) == len(LenList))
DataList = []
# adjust DataList according to the LenList
for Index in xrange(len(RawDataList)):
Len = LenList[Index]
RawDatas = RawDataList[Index]
assert(Len >= len(RawDatas))
ActualDatas = []
for i in xrange(len(RawDatas)):
ActualDatas.append(RawDatas[i])
for i in xrange(len(RawDatas), Len):
ActualDatas.append(0)
DataList.append(ActualDatas)
self.LenList = LenList
DbComItemList.__init__(self, ItemSize, DataList, RawDataList)
def GetInterOffset(self, Index):
Offset = 0
assert(Index < len(self.LenList))
for ItemIndex in xrange(Index):
Offset += self.LenList[ItemIndex]
return Offset
def GetListSize(self):
if self.ListSize:
return self.ListSize
if len(self.LenList) == 0:
self.ListSize = 0
else:
self.ListSize = self.GetInterOffset(len(self.LenList) - 1) + self.LenList[len(self.LenList)-1]
return self.ListSize
def PackData(self):
self.RawDataList = self.DataList
return DbComItemList.PackData(self)
## Find the index in two list where the item matches the key separately
#
# @param Key1 The key used to search the List1
# @param List1 The list that Key1 will be searched
# @param Key2 The key used to search the List2
# @param List2 The list that Key2 will be searched
#
# @retval Index The position inside the list where list1[Index] == Key1 and list2[Index] == Key2
#
def GetMatchedIndex(Key1, List1, Key2, List2):
StartPos = 0
while StartPos < len(List1):
Index = List1.index(Key1, StartPos)
if List2[Index] == Key2:
return Index
else:
StartPos = Index + 1
return -1
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if type(Input) in (int, long):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
## convert StringArray like {0x36, 0x00, 0x34, 0x00, 0x21, 0x00, 0x36, 0x00, 0x34, 0x00, 0x00, 0x00}
# to List like [0x36, 0x00, 0x34, 0x00, 0x21, 0x00, 0x36, 0x00, 0x34, 0x00, 0x00, 0x00]
#
# @param StringArray A string array like {0x36, 0x00, 0x34, 0x00, 0x21, 0x00, 0x36, 0x00, 0x34, 0x00, 0x00, 0x00}
#
# @retval A list object of integer items
#
def StringArrayToList(StringArray):
StringArray = StringArray[1:-1]
StringArray = '[' + StringArray + ']'
return eval(StringArray)
## Convert TokenType String like "PCD_DATUM_TYPE_UINT32 | PCD_TYPE_HII" to TokenType value
#
# @param TokenType A TokenType string like "PCD_DATUM_TYPE_UINT32 | PCD_TYPE_HII"
#
# @retval A integer representation of the TokenType
#
def GetTokenTypeValue(TokenType):
TokenTypeDict = {
"PCD_TYPE_SHIFT":28,
"PCD_TYPE_DATA":(0x0 << 28),
"PCD_TYPE_HII":(0x8 << 28),
"PCD_TYPE_VPD":(0x4 << 28),
"PCD_TYPE_SKU_ENABLED":(0x2 << 28),
"PCD_TYPE_STRING":(0x1 << 28),
"PCD_DATUM_TYPE_SHIFT":24,
"PCD_DATUM_TYPE_POINTER":(0x0 << 24),
"PCD_DATUM_TYPE_UINT8":(0x1 << 24),
"PCD_DATUM_TYPE_UINT16":(0x2 << 24),
"PCD_DATUM_TYPE_UINT32":(0x4 << 24),
"PCD_DATUM_TYPE_UINT64":(0x8 << 24),
"PCD_DATUM_TYPE_SHIFT2":20,
"PCD_DATUM_TYPE_UINT8_BOOLEAN":(0x1 << 20 | 0x1 << 24),
}
return eval(TokenType, TokenTypeDict)
## construct the external Pcd database using data from Dict
#
# @param Dict A dictionary contains Pcd related tables
#
# @retval Buffer A byte stream of the Pcd database
#
def BuildExDataBase(Dict):
# init Db items
InitValueUint64 = Dict['INIT_DB_VALUE_UINT64']
DbInitValueUint64 = DbComItemList(8, RawDataList = InitValueUint64)
VardefValueUint64 = Dict['VARDEF_DB_VALUE_UINT64']
DbVardefValueUint64 = DbItemList(8, RawDataList = VardefValueUint64)
InitValueUint32 = Dict['INIT_DB_VALUE_UINT32']
DbInitValueUint32 = DbComItemList(4, RawDataList = InitValueUint32)
VardefValueUint32 = Dict['VARDEF_DB_VALUE_UINT32']
DbVardefValueUint32 = DbItemList(4, RawDataList = VardefValueUint32)
VpdHeadValue = Dict['VPD_DB_VALUE']
DbVpdHeadValue = DbComItemList(4, RawDataList = VpdHeadValue)
ExMapTable = zip(Dict['EXMAPPING_TABLE_EXTOKEN'], Dict['EXMAPPING_TABLE_LOCAL_TOKEN'], Dict['EXMAPPING_TABLE_GUID_INDEX'])
DbExMapTable = DbExMapTblItemList(8, RawDataList = ExMapTable)
LocalTokenNumberTable = Dict['LOCAL_TOKEN_NUMBER_DB_VALUE']
DbLocalTokenNumberTable = DbItemList(4, RawDataList = LocalTokenNumberTable)
GuidTable = Dict['GUID_STRUCTURE']
DbGuidTable = DbItemList(16, RawDataList = GuidTable)
StringHeadValue = Dict['STRING_DB_VALUE']
# DbItemList to DbStringHeadTableItemList
DbStringHeadValue = DbStringHeadTableItemList(4, RawDataList = StringHeadValue)
VariableTable = Dict['VARIABLE_DB_VALUE']
DbVariableTable = DbVariableTableItemList(20, RawDataList = VariableTable)
NumberOfSkuEnabledPcd = GetIntegerValue(Dict['SKU_HEAD_SIZE'])
Dict['SKUHEAD_TABLE_VALUE'] = [(0,0) for i in xrange(NumberOfSkuEnabledPcd)]
SkuTable = Dict['SKUHEAD_TABLE_VALUE'] # Generated later
DbSkuTable = DbSkuHeadTableItemList(8, RawDataList = SkuTable)
Dict['STRING_TABLE_DB_VALUE'] = [StringArrayToList(x) for x in Dict['STRING_TABLE_VALUE']]
StringTableValue = Dict['STRING_TABLE_DB_VALUE']
# when calcute the offset, should use StringTableLen instead of StringTableValue, as string maxium len may be different with actual len
StringTableLen = Dict['STRING_TABLE_LENGTH']
DbStringTableLen = DbStringItemList(0, RawDataList = StringTableValue, LenList = StringTableLen)
PcdTokenTable = Dict['PCD_TOKENSPACE']
PcdTokenLen = Dict['PCD_TOKENSPACE_LENGTH']
PcdTokenTableValue = [StringArrayToList(x) for x in Dict['PCD_TOKENSPACE']]
DbPcdTokenTable = DbStringItemList(0, RawDataList = PcdTokenTableValue, LenList = PcdTokenLen)
PcdCNameTable = Dict['PCD_CNAME']
PcdCNameLen = Dict['PCD_CNAME_LENGTH']
PcdCNameTableValue = [StringArrayToList(x) for x in Dict['PCD_CNAME']]
DbPcdCNameTable = DbStringItemList(0, RawDataList = PcdCNameTableValue, LenList = PcdCNameLen)
PcdNameOffsetTable = Dict['PCD_NAME_OFFSET']
DbPcdNameOffsetTable = DbItemList(4,RawDataList = PcdNameOffsetTable)
SizeTableValue = zip(Dict['SIZE_TABLE_MAXIMUM_LENGTH'], Dict['SIZE_TABLE_CURRENT_LENGTH'])
DbSizeTableValue = DbSizeTableItemList(2, RawDataList = SizeTableValue)
InitValueUint16 = Dict['INIT_DB_VALUE_UINT16']
DbInitValueUint16 = DbComItemList(2, RawDataList = InitValueUint16)
VardefValueUint16 = Dict['VARDEF_DB_VALUE_UINT16']
DbVardefValueUint16 = DbItemList(2, RawDataList = VardefValueUint16)
InitValueUint8 = Dict['INIT_DB_VALUE_UINT8']
DbInitValueUint8 = DbComItemList(1, RawDataList = InitValueUint8)
VardefValueUint8 = Dict['VARDEF_DB_VALUE_UINT8']
DbVardefValueUint8 = DbItemList(1, RawDataList = VardefValueUint8)
InitValueBoolean = Dict['INIT_DB_VALUE_BOOLEAN']
DbInitValueBoolean = DbComItemList(1, RawDataList = InitValueBoolean)
VardefValueBoolean = Dict['VARDEF_DB_VALUE_BOOLEAN']
DbVardefValueBoolean = DbItemList(1, RawDataList = VardefValueBoolean)
SkuidValue = Dict['SKUID_VALUE']
DbSkuidValue = DbItemList(8, RawDataList = SkuidValue)
SkuIndexValue = Dict['SKU_INDEX_VALUE']
DbSkuIndexValue = DbItemList(8,RawDataList = SkuIndexValue)
# Unit Db Items
UnInitValueUint64 = Dict['UNINIT_GUID_DECL_UINT64']
DbUnInitValueUint64 = DbItemList(8, RawDataList = UnInitValueUint64)
UnInitValueUint32 = Dict['UNINIT_GUID_DECL_UINT32']
DbUnInitValueUint32 = DbItemList(4, RawDataList = UnInitValueUint32)
UnInitValueUint16 = Dict['UNINIT_GUID_DECL_UINT16']
DbUnInitValueUint16 = DbItemList(2, RawDataList = UnInitValueUint16)
UnInitValueUint8 = Dict['UNINIT_GUID_DECL_UINT8']
DbUnInitValueUint8 = DbItemList(1, RawDataList = UnInitValueUint8)
UnInitValueBoolean = Dict['UNINIT_GUID_DECL_BOOLEAN']
DbUnInitValueBoolean = DbItemList(1, RawDataList = UnInitValueBoolean)
PcdTokenNumberMap = Dict['PCD_ORDER_TOKEN_NUMBER_MAP']
DbNameTotle = ["SkuidValue", "SkuIndexValue", "InitValueUint64", "VardefValueUint64", "InitValueUint32", "VardefValueUint32", "VpdHeadValue", "ExMapTable",
"LocalTokenNumberTable", "GuidTable", "StringHeadValue", "PcdNameOffsetTable","VariableTable","SkuTable", "StringTableLen", "PcdTokenTable", "PcdCNameTable",
"SizeTableValue", "InitValueUint16", "VardefValueUint16", "InitValueUint8", "VardefValueUint8", "InitValueBoolean",
"VardefValueBoolean", "UnInitValueUint64", "UnInitValueUint32", "UnInitValueUint16", "UnInitValueUint8", "UnInitValueBoolean"]
DbTotal = [SkuidValue, SkuIndexValue, InitValueUint64, VardefValueUint64, InitValueUint32, VardefValueUint32, VpdHeadValue, ExMapTable,
LocalTokenNumberTable, GuidTable, StringHeadValue, PcdNameOffsetTable,VariableTable,SkuTable, StringTableLen, PcdTokenTable,PcdCNameTable,
SizeTableValue, InitValueUint16, VardefValueUint16, InitValueUint8, VardefValueUint8, InitValueBoolean,
VardefValueBoolean, UnInitValueUint64, UnInitValueUint32, UnInitValueUint16, UnInitValueUint8, UnInitValueBoolean]
DbItemTotal = [DbSkuidValue, DbSkuIndexValue, DbInitValueUint64, DbVardefValueUint64, DbInitValueUint32, DbVardefValueUint32, DbVpdHeadValue, DbExMapTable,
DbLocalTokenNumberTable, DbGuidTable, DbStringHeadValue, DbPcdNameOffsetTable,DbVariableTable,DbSkuTable, DbStringTableLen, DbPcdTokenTable, DbPcdCNameTable,
DbSizeTableValue, DbInitValueUint16, DbVardefValueUint16, DbInitValueUint8, DbVardefValueUint8, DbInitValueBoolean,
DbVardefValueBoolean, DbUnInitValueUint64, DbUnInitValueUint32, DbUnInitValueUint16, DbUnInitValueUint8, DbUnInitValueBoolean]
# VardefValueBoolean is the last table in the init table items
InitTableNum = DbNameTotle.index("VardefValueBoolean") + 1
# The FixedHeader length of the PCD_DATABASE_INIT, from Signature to Pad
FixedHeaderLen = 72
# Get offset of SkuId table in the database
SkuIdTableOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is SkuidValue:
break
SkuIdTableOffset += DbItemTotal[DbIndex].GetListSize()
# Get offset of SkuValue table in the database
SkuTableOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is SkuTable:
break
elif DbItemTotal[DbIndex] is DbSkuIndexValue:
if DbItemTotal[DbIndex].RawDataList:
Count = 0
for item in DbItemTotal[DbIndex].RawDataList:
Count += len(item)
SkuTableOffset += DbItemTotal[DbIndex].ItemSize * Count
continue
SkuTableOffset += DbItemTotal[DbIndex].GetListSize()
# Fix up the LocalTokenNumberTable, SkuHeader table
SkuHeaderIndex = 0
if len(Dict['SKU_INDEX_VALUE']) > 0:
SkuIndexIndexTable = [(0) for i in xrange(len(Dict['SKU_INDEX_VALUE']))]
SkuIndexIndexTable[0] = 0 #Dict['SKU_INDEX_VALUE'][0][0]
for i in range(1,len(Dict['SKU_INDEX_VALUE'])):
SkuIndexIndexTable[i] = SkuIndexIndexTable[i-1]+Dict['SKU_INDEX_VALUE'][i-1][0] + 1
for (LocalTokenNumberTableIndex, (Offset, Table)) in enumerate(LocalTokenNumberTable):
DbIndex = 0
DbOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is Table:
DbOffset += DbItemTotal[DbIndex].GetInterOffset(Offset)
break
elif DbItemTotal[DbIndex] is DbSkuIndexValue:
if DbItemTotal[DbIndex].RawDataList:
Count = 0
for item in DbItemTotal[DbIndex].RawDataList:
Count += len(item)
DbOffset += DbItemTotal[DbIndex].ItemSize * Count
continue
DbOffset += DbItemTotal[DbIndex].GetListSize()
if DbIndex + 1 == InitTableNum:
if DbOffset % 8:
DbOffset += (8 - DbOffset % 8)
else:
assert(False)
TokenTypeValue = Dict['TOKEN_TYPE'][LocalTokenNumberTableIndex]
TokenTypeValue = GetTokenTypeValue(TokenTypeValue)
LocalTokenNumberTable[LocalTokenNumberTableIndex] = DbOffset|int(TokenTypeValue)
# if PCD_TYPE_SKU_ENABLED, then we need to fix up the SkuTable
SkuIndexTabalOffset = SkuIdTableOffset + len(Dict['SKUID_VALUE']) * 8
if (TokenTypeValue & (0x2 << 28)):
SkuTable[SkuHeaderIndex] = (DbOffset|int(TokenTypeValue & ~(0x2<<28)), SkuIndexTabalOffset + SkuIndexIndexTable[PcdTokenNumberMap[LocalTokenNumberTableIndex]] * 8)
LocalTokenNumberTable[LocalTokenNumberTableIndex] = (SkuTableOffset + SkuHeaderIndex * 8) | int(TokenTypeValue)
SkuHeaderIndex += 1
if SkuHeaderIndex == 0:
SkuHeaderIndex = 1
assert(SkuHeaderIndex == NumberOfSkuEnabledPcd)
# resolve variable table offset
for VariableEntries in VariableTable:
skuindex = 0
for VariableEntryPerSku in VariableEntries:
(VariableHeadGuidIndex, VariableHeadStringIndex, SKUVariableOffset, VariableOffset, VariableRefTable, VariableAttribute) = VariableEntryPerSku[:]
DbIndex = 0
DbOffset = FixedHeaderLen
for DbIndex in xrange(len(DbTotal)):
if DbTotal[DbIndex] is VariableRefTable:
DbOffset += DbItemTotal[DbIndex].GetInterOffset(VariableOffset)
break
elif DbItemTotal[DbIndex] is DbSkuIndexValue:
if DbItemTotal[DbIndex].RawDataList:
Count = 0
for item in DbItemTotal[DbIndex].RawDataList:
Count += len(item)
DbOffset += DbItemTotal[DbIndex].ItemSize * Count
continue
DbOffset += DbItemTotal[DbIndex].GetListSize()
if DbIndex + 1 == InitTableNum:
if DbOffset % 8:
DbOffset += (8 - DbOffset % 8)
else:
assert(False)
if isinstance(VariableRefTable[0],list):
DbOffset += skuindex * 4
skuindex += 1
if DbIndex >= InitTableNum:
assert(False)
VarAttr, VarProp = VariableAttributes.GetVarAttributes(VariableAttribute)
VariableEntryPerSku[:] = (VariableHeadStringIndex, DbOffset, VariableHeadGuidIndex, SKUVariableOffset, VarAttr, VarProp)
# calculate various table offset now
DbTotalLength = FixedHeaderLen
for DbIndex in xrange(len(DbItemTotal)):
if DbItemTotal[DbIndex] is DbLocalTokenNumberTable:
LocalTokenNumberTableOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbExMapTable:
ExMapTableOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbGuidTable:
GuidTableOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbStringTableLen:
StringTableOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbSizeTableValue:
SizeTableOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbSkuidValue:
SkuIdTableOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbPcdNameOffsetTable:
DbPcdNameOffset = DbTotalLength
elif DbItemTotal[DbIndex] is DbSkuIndexValue:
if DbItemTotal[DbIndex].RawDataList:
Count = 0
for item in DbItemTotal[DbIndex].RawDataList:
Count += len(item)
DbTotalLength += DbItemTotal[DbIndex].ItemSize * Count
continue
DbTotalLength += DbItemTotal[DbIndex].GetListSize()
if not Dict['PCD_INFO_FLAG']:
DbPcdNameOffset = 0
LocalTokenCount = GetIntegerValue(Dict['LOCAL_TOKEN_NUMBER'])
ExTokenCount = GetIntegerValue(Dict['EX_TOKEN_NUMBER'])
GuidTableCount = GetIntegerValue(Dict['GUID_TABLE_SIZE'])
SystemSkuId = GetIntegerValue(Dict['SYSTEM_SKU_ID_VALUE'])
Pad = 0xDA
UninitDataBaseSize = 0
for Item in (DbUnInitValueUint64, DbUnInitValueUint32, DbUnInitValueUint16, DbUnInitValueUint8, DbUnInitValueBoolean):
UninitDataBaseSize += Item.GetListSize()
if (DbTotalLength - UninitDataBaseSize) % 8:
DbTotalLength += (8 - (DbTotalLength - UninitDataBaseSize) % 8)
# Construct the database buffer
Guid = "{0x3c7d193c, 0x682c, 0x4c14, 0xa6, 0x8f, 0x55, 0x2d, 0xea, 0x4f, 0x43, 0x7e}"
Guid = StringArrayToList(Guid)
Buffer = pack('=LHHBBBBBBBB',
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
b = pack("=L", DATABASE_VERSION)
Buffer += b
b = pack('=L', DbTotalLength - UninitDataBaseSize)
Buffer += b
b = pack('=Q', SystemSkuId)
Buffer += b
b = pack('=L', UninitDataBaseSize)
Buffer += b
b = pack('=L', LocalTokenNumberTableOffset)
Buffer += b
b = pack('=L', ExMapTableOffset)
Buffer += b
b = pack('=L', GuidTableOffset)
Buffer += b
b = pack('=L', StringTableOffset)
Buffer += b
b = pack('=L', SizeTableOffset)
Buffer += b
b = pack('=L', SkuIdTableOffset)
Buffer += b
b = pack('=L', DbPcdNameOffset)
Buffer += b
b = pack('=H', LocalTokenCount)
Buffer += b
b = pack('=H', ExTokenCount)
Buffer += b
b = pack('=H', GuidTableCount)
Buffer += b
b = pack('=B', Pad)
Buffer += b
b = pack('=B', Pad)
Buffer += b
Index = 0
for Item in DbItemTotal:
Index +=1
b = Item.PackData()
Buffer += b
if Index == InitTableNum:
if len(Buffer) % 8:
for num in range(8 - len(Buffer) % 8):
b = pack('=B', Pad)
Buffer += b
break
return Buffer
## Create code for PCD database
#
# @param Info The ModuleAutoGen object
# @param AutoGenC The TemplateString object for C code
# @param AutoGenH The TemplateString object for header file
#
def CreatePcdDatabaseCode (Info, AutoGenC, AutoGenH):
if Info.PcdIsDriver == "":
return
if Info.PcdIsDriver not in gPcdPhaseMap:
EdkLogger.error("build", AUTOGEN_ERROR, "Not supported PcdIsDriver type:%s" % Info.PcdIsDriver,
ExtraData="[%s]" % str(Info))
AdditionalAutoGenH, AdditionalAutoGenC, PcdDbBuffer = CreatePcdDatabasePhaseSpecificAutoGen (Info.PlatformInfo, 'PEI')
AutoGenH.Append(AdditionalAutoGenH.String)
Phase = gPcdPhaseMap[Info.PcdIsDriver]
if Phase == 'PEI':
AutoGenC.Append(AdditionalAutoGenC.String)
if Phase == 'DXE':
AdditionalAutoGenH, AdditionalAutoGenC, PcdDbBuffer = CreatePcdDatabasePhaseSpecificAutoGen (Info.PlatformInfo, Phase)
AutoGenH.Append(AdditionalAutoGenH.String)
AutoGenC.Append(AdditionalAutoGenC.String)
if Info.IsBinaryModule:
DbFileName = os.path.join(Info.PlatformInfo.BuildDir, "FV", Phase + "PcdDataBase.raw")
else:
DbFileName = os.path.join(Info.OutputDir, Phase + "PcdDataBase.raw")
DbFile = StringIO()
DbFile.write(PcdDbBuffer)
Changed = SaveFileOnChange(DbFileName, DbFile.getvalue(), True)
## Create PCD database in DXE or PEI phase
#
# @param Platform The platform object
# @retval tuple Two TemplateString objects for C code and header file,
# respectively
#
def CreatePcdDatabasePhaseSpecificAutoGen (Platform, Phase):
AutoGenC = TemplateString()
AutoGenH = TemplateString()
Dict = {
'PHASE' : Phase,
'SERVICE_DRIVER_VERSION' : DATABASE_VERSION,
'GUID_TABLE_SIZE' : '1U',
'STRING_TABLE_SIZE' : '1U',
'SKUID_TABLE_SIZE' : '1U',
'LOCAL_TOKEN_NUMBER_TABLE_SIZE' : '0U',
'LOCAL_TOKEN_NUMBER' : '0U',
'EXMAPPING_TABLE_SIZE' : '1U',
'EX_TOKEN_NUMBER' : '0U',
'SIZE_TABLE_SIZE' : '2U',
'SKU_HEAD_SIZE' : '1U',
'GUID_TABLE_EMPTY' : 'TRUE',
'STRING_TABLE_EMPTY' : 'TRUE',
'SKUID_TABLE_EMPTY' : 'TRUE',
'DATABASE_EMPTY' : 'TRUE',
'EXMAP_TABLE_EMPTY' : 'TRUE',
'PCD_DATABASE_UNINIT_EMPTY' : ' UINT8 dummy; /* PCD_DATABASE_UNINIT is emptry */',
'SYSTEM_SKU_ID' : ' SKU_ID SystemSkuId;',
'SYSTEM_SKU_ID_VALUE' : '0U'
}
SkuObj = SkuClass(Platform.Platform.AvilableSkuIds, Platform.Platform.SkuIds)
Dict['SYSTEM_SKU_ID_VALUE'] = Platform.Platform.SkuIds[SkuObj.SystemSkuId]
Dict['PCD_INFO_FLAG'] = Platform.Platform.PcdInfoFlag
for DatumType in ['UINT64','UINT32','UINT16','UINT8','BOOLEAN', "VOID*"]:
Dict['VARDEF_CNAME_' + DatumType] = []
Dict['VARDEF_GUID_' + DatumType] = []
Dict['VARDEF_SKUID_' + DatumType] = []
Dict['VARDEF_VALUE_' + DatumType] = []
Dict['VARDEF_DB_VALUE_' + DatumType] = []
for Init in ['INIT','UNINIT']:
Dict[Init+'_CNAME_DECL_' + DatumType] = []
Dict[Init+'_GUID_DECL_' + DatumType] = []
Dict[Init+'_NUMSKUS_DECL_' + DatumType] = []
Dict[Init+'_VALUE_' + DatumType] = []
Dict[Init+'_DB_VALUE_'+DatumType] = []
for Type in ['STRING_HEAD','VPD_HEAD','VARIABLE_HEAD']:
Dict[Type + '_CNAME_DECL'] = []
Dict[Type + '_GUID_DECL'] = []
Dict[Type + '_NUMSKUS_DECL'] = []
Dict[Type + '_VALUE'] = []
Dict['STRING_DB_VALUE'] = []
Dict['VPD_DB_VALUE'] = []
Dict['VARIABLE_DB_VALUE'] = []
Dict['STRING_TABLE_INDEX'] = []
Dict['STRING_TABLE_LENGTH'] = []
Dict['STRING_TABLE_CNAME'] = []
Dict['STRING_TABLE_GUID'] = []
Dict['STRING_TABLE_VALUE'] = []
Dict['STRING_TABLE_DB_VALUE'] = []
Dict['SIZE_TABLE_CNAME'] = []
Dict['SIZE_TABLE_GUID'] = []
Dict['SIZE_TABLE_CURRENT_LENGTH'] = []
Dict['SIZE_TABLE_MAXIMUM_LENGTH'] = []
Dict['EXMAPPING_TABLE_EXTOKEN'] = []
Dict['EXMAPPING_TABLE_LOCAL_TOKEN'] = []
Dict['EXMAPPING_TABLE_GUID_INDEX'] = []
Dict['GUID_STRUCTURE'] = []
Dict['SKUID_VALUE'] = [0] # init Dict length
Dict['VARDEF_HEADER'] = []
Dict['LOCAL_TOKEN_NUMBER_DB_VALUE'] = []
Dict['VARIABLE_DB_VALUE'] = []
Dict['SKUHEAD_TABLE_VALUE'] = []
Dict['SKU_INDEX_VALUE'] = []
Dict['PCD_TOKENSPACE'] = []
Dict['PCD_CNAME'] = []
Dict['PCD_TOKENSPACE_LENGTH'] = []
Dict['PCD_CNAME_LENGTH'] = []
Dict['PCD_TOKENSPACE_OFFSET'] = []
Dict['PCD_CNAME_OFFSET'] = []
Dict['PCD_TOKENSPACE_MAP'] = []
Dict['PCD_NAME_OFFSET'] = []
Dict['PCD_ORDER_TOKEN_NUMBER_MAP'] = {}
PCD_STRING_INDEX_MAP = {}
StringTableIndex = 0
StringTableSize = 0
NumberOfLocalTokens = 0
NumberOfPeiLocalTokens = 0
NumberOfDxeLocalTokens = 0
NumberOfExTokens = 0
NumberOfSizeItems = 0
NumberOfSkuEnabledPcd = 0
GuidList = []
VarCheckTab = VAR_CHECK_PCD_VARIABLE_TAB_CONTAINER()
i = 0
ReorderedDynPcdList = GetOrderedDynamicPcdList(Platform.DynamicPcdList, Platform.PcdTokenNumber)
for Pcd in ReorderedDynPcdList:
VoidStarTypeCurrSize = []
i += 1
CName = Pcd.TokenCName
TokenSpaceGuidCName = Pcd.TokenSpaceGuidCName
for PcdItem in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
CName = PcdItem[0]
if GlobalData.BuildOptionPcd:
for PcdItem in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, CName) == (PcdItem[0], PcdItem[1]):
Pcd.DefaultValue = PcdItem[2]
break
EdkLogger.debug(EdkLogger.DEBUG_3, "PCD: %s %s (%s : %s)" % (CName, TokenSpaceGuidCName, Pcd.Phase, Phase))
if Pcd.Phase == 'PEI':
NumberOfPeiLocalTokens += 1
if Pcd.Phase == 'DXE':
NumberOfDxeLocalTokens += 1
if Pcd.Phase != Phase:
continue
#
# TODO: need GetGuidValue() definition
#
TokenSpaceGuidStructure = Pcd.TokenSpaceGuidValue
TokenSpaceGuid = GuidStructureStringToGuidValueName(TokenSpaceGuidStructure)
if Pcd.Type in gDynamicExPcd:
if TokenSpaceGuid not in GuidList:
GuidList += [TokenSpaceGuid]
Dict['GUID_STRUCTURE'].append(TokenSpaceGuidStructure)
NumberOfExTokens += 1
ValueList = []
DbValueList = []
StringHeadOffsetList = []
StringDbOffsetList = []
VpdHeadOffsetList = []
VpdDbOffsetList = []
VariableHeadValueList = []
VariableDbValueList = []
Pcd.InitString = 'UNINIT'
if Pcd.DatumType == 'VOID*':
if Pcd.Type not in ["DynamicVpd", "DynamicExVpd"]:
Pcd.TokenTypeList = ['PCD_TYPE_STRING']
else:
Pcd.TokenTypeList = []
elif Pcd.DatumType == 'BOOLEAN':
Pcd.TokenTypeList = ['PCD_DATUM_TYPE_UINT8_BOOLEAN']
else:
Pcd.TokenTypeList = ['PCD_DATUM_TYPE_' + Pcd.DatumType]
if len(Pcd.SkuInfoList) > 1:
Pcd.TokenTypeList += ['PCD_TYPE_SKU_ENABLED']
NumberOfSkuEnabledPcd += 1
SkuIndexTableTmp = []
SkuIndexTableTmp.append(0)
SkuIdIndex = 1
VariableHeadList = []
for SkuName in Pcd.SkuInfoList:
Sku = Pcd.SkuInfoList[SkuName]
SkuId = Sku.SkuId
if SkuId == None or SkuId == '':
continue
if (SkuId + 'U') not in Dict['SKUID_VALUE']:
Dict['SKUID_VALUE'].append(SkuId + 'U')
SkuIndexTableTmp.append(SkuId+'U')
SkuIdIndex += 1
if len(Sku.VariableName) > 0:
VariableGuidStructure = Sku.VariableGuidValue
VariableGuid = GuidStructureStringToGuidValueName(VariableGuidStructure)
if Platform.Platform.VarCheckFlag:
var_check_obj = VAR_CHECK_PCD_VARIABLE_TAB(VariableGuidStructure, StringToArray(Sku.VariableName))
try:
var_check_obj.push_back(VAR_VALID_OBJECT_FACTORY.Get_valid_object(Pcd, Sku.VariableOffset))
VarAttr, _ = VariableAttributes.GetVarAttributes(Sku.VariableAttribute)
var_check_obj.SetAttributes(VarAttr)
var_check_obj.UpdateSize()
VarCheckTab.push_back(var_check_obj)
except Exception:
ValidInfo = ''
if Pcd.validateranges:
ValidInfo = Pcd.validateranges[0]
if Pcd.validlists:
ValidInfo = Pcd.validlists[0]
if ValidInfo:
EdkLogger.error("build", PCD_VALIDATION_INFO_ERROR,
"The PCD '%s.%s' Validation information defined in DEC file has incorrect format." % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName),
ExtraData = "[%s]" % str(ValidInfo))
else:
EdkLogger.error("build", PCD_VALIDATION_INFO_ERROR,
"The PCD '%s.%s' Validation information defined in DEC file has incorrect format." % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
Pcd.TokenTypeList += ['PCD_TYPE_HII']
Pcd.InitString = 'INIT'
# Store all variable names of one HII PCD under different SKU to stringTable
# and calculate the VariableHeadStringIndex
if SkuIdIndex - 2 == 0:
for SkuName2 in Pcd.SkuInfoList:
SkuInfo = Pcd.SkuInfoList[SkuName2]
if SkuInfo.SkuId == None or SkuInfo.SkuId == '':
continue
VariableNameStructure = StringToArray(SkuInfo.VariableName)
if VariableNameStructure not in Dict['STRING_TABLE_VALUE']:
Dict['STRING_TABLE_CNAME'].append(CName)
Dict['STRING_TABLE_GUID'].append(TokenSpaceGuid)
if StringTableIndex == 0:
Dict['STRING_TABLE_INDEX'].append('')
else:
Dict['STRING_TABLE_INDEX'].append('_%d' % StringTableIndex)
VarNameSize = len(VariableNameStructure.replace(',',' ').split())
Dict['STRING_TABLE_LENGTH'].append(VarNameSize )
Dict['STRING_TABLE_VALUE'].append(VariableNameStructure)
StringHeadOffsetList.append(str(StringTableSize) + 'U')
VarStringDbOffsetList = []
VarStringDbOffsetList.append(StringTableSize)
Dict['STRING_DB_VALUE'].append(VarStringDbOffsetList)
StringTableIndex += 1
StringTableSize += len(VariableNameStructure.replace(',',' ').split())
VariableHeadStringIndex = 0
for Index in range(Dict['STRING_TABLE_VALUE'].index(VariableNameStructure)):
VariableHeadStringIndex += Dict['STRING_TABLE_LENGTH'][Index]
VariableHeadList.append(VariableHeadStringIndex)
VariableHeadStringIndex = VariableHeadList[SkuIdIndex - 2]
# store VariableGuid to GuidTable and get the VariableHeadGuidIndex
if VariableGuid not in GuidList:
GuidList += [VariableGuid]
Dict['GUID_STRUCTURE'].append(VariableGuidStructure)
VariableHeadGuidIndex = GuidList.index(VariableGuid)
if "PCD_TYPE_STRING" in Pcd.TokenTypeList:
VariableHeadValueList.append('%dU, offsetof(%s_PCD_DATABASE, Init.%s_%s), %dU, %sU' %
(VariableHeadStringIndex, Phase, CName, TokenSpaceGuid,
VariableHeadGuidIndex, Sku.VariableOffset))
else:
VariableHeadValueList.append('%dU, offsetof(%s_PCD_DATABASE, Init.%s_%s_VariableDefault_%s), %dU, %sU' %
(VariableHeadStringIndex, Phase, CName, TokenSpaceGuid, SkuIdIndex,
VariableHeadGuidIndex, Sku.VariableOffset))
Dict['VARDEF_CNAME_'+Pcd.DatumType].append(CName)
Dict['VARDEF_GUID_'+Pcd.DatumType].append(TokenSpaceGuid)
Dict['VARDEF_SKUID_'+Pcd.DatumType].append(SkuIdIndex)
if "PCD_TYPE_STRING" in Pcd.TokenTypeList:
Dict['VARDEF_VALUE_' + Pcd.DatumType].append("%s_%s[%d]" % (Pcd.TokenCName, TokenSpaceGuid, SkuIdIndex))
else:
#
# ULL (for UINT64) or U(other integer type) should be append to avoid
# warning under linux building environment.
#
Dict['VARDEF_DB_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue)
if Pcd.DatumType == "UINT64":
Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue + "ULL")
elif Pcd.DatumType in ("UINT32", "UINT16", "UINT8"):
Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue + "U")
elif Pcd.DatumType == "BOOLEAN":
if eval(Sku.HiiDefaultValue) in [1,0]:
Dict['VARDEF_VALUE_'+Pcd.DatumType].append(str(eval(Sku.HiiDefaultValue)) + "U")
else:
Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue)
# construct the VariableHeader value
if "PCD_TYPE_STRING" in Pcd.TokenTypeList:
VariableHeadValueList.append('%dU, %dU, %sU, offsetof(%s_PCD_DATABASE, Init.%s_%s)' %
(VariableHeadGuidIndex, VariableHeadStringIndex, Sku.VariableOffset,
Phase, CName, TokenSpaceGuid))
# the Pcd default value will be filled later on
VariableOffset = len(Dict['STRING_DB_VALUE'])
VariableRefTable = Dict['STRING_DB_VALUE']
else:
VariableHeadValueList.append('%dU, %dU, %sU, offsetof(%s_PCD_DATABASE, Init.%s_%s_VariableDefault_%s)' %
(VariableHeadGuidIndex, VariableHeadStringIndex, Sku.VariableOffset,
Phase, CName, TokenSpaceGuid, SkuIdIndex))
# the Pcd default value was filled before
VariableOffset = len(Dict['VARDEF_DB_VALUE_' + Pcd.DatumType]) - 1
VariableRefTable = Dict['VARDEF_DB_VALUE_' + Pcd.DatumType]
VariableDbValueList.append([VariableHeadGuidIndex, VariableHeadStringIndex, Sku.VariableOffset, VariableOffset, VariableRefTable, Sku.VariableAttribute])
elif Sku.VpdOffset != '':
Pcd.TokenTypeList += ['PCD_TYPE_VPD']
Pcd.InitString = 'INIT'
VpdHeadOffsetList.append(str(Sku.VpdOffset) + 'U')
VpdDbOffsetList.append(Sku.VpdOffset)
# Also add the VOID* string of VPD PCD to SizeTable
if Pcd.DatumType == 'VOID*':
NumberOfSizeItems += 1
# For VPD type of PCD, its current size is equal to its MAX size.
VoidStarTypeCurrSize = [str(Pcd.MaxDatumSize) + 'U']
continue
if Pcd.DatumType == 'VOID*':
Pcd.TokenTypeList += ['PCD_TYPE_STRING']
Pcd.InitString = 'INIT'
if Sku.HiiDefaultValue != '' and Sku.DefaultValue == '':
Sku.DefaultValue = Sku.HiiDefaultValue
if Sku.DefaultValue != '':
NumberOfSizeItems += 1
Dict['STRING_TABLE_CNAME'].append(CName)
Dict['STRING_TABLE_GUID'].append(TokenSpaceGuid)
if StringTableIndex == 0:
Dict['STRING_TABLE_INDEX'].append('')
else:
Dict['STRING_TABLE_INDEX'].append('_%d' % StringTableIndex)
if Sku.DefaultValue[0] == 'L':
DefaultValueBinStructure = StringToArray(Sku.DefaultValue)
Size = len(DefaultValueBinStructure.replace(',',' ').split())
Dict['STRING_TABLE_VALUE'].append(DefaultValueBinStructure)
elif Sku.DefaultValue[0] == '"':
DefaultValueBinStructure = StringToArray(Sku.DefaultValue)
Size = len(Sku.DefaultValue) - 2 + 1
Dict['STRING_TABLE_VALUE'].append(DefaultValueBinStructure)
elif Sku.DefaultValue[0] == '{':
DefaultValueBinStructure = StringToArray(Sku.DefaultValue)
Size = len(Sku.DefaultValue.split(","))
Dict['STRING_TABLE_VALUE'].append(DefaultValueBinStructure)
StringHeadOffsetList.append(str(StringTableSize) + 'U')
StringDbOffsetList.append(StringTableSize)
if Pcd.MaxDatumSize != '':
MaxDatumSize = int(Pcd.MaxDatumSize, 0)
if MaxDatumSize < Size:
EdkLogger.error("build", AUTOGEN_ERROR,
"The maximum size of VOID* type PCD '%s.%s' is less than its actual size occupied." % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName),
ExtraData="[%s]" % str(Platform))
else:
MaxDatumSize = Size
StringTabLen = MaxDatumSize
if StringTabLen % 2:
StringTabLen += 1
if Sku.VpdOffset == '':
VoidStarTypeCurrSize.append(str(Size) + 'U')
Dict['STRING_TABLE_LENGTH'].append(StringTabLen)
StringTableIndex += 1
StringTableSize += (StringTabLen)
else:
if "PCD_TYPE_HII" not in Pcd.TokenTypeList:
Pcd.TokenTypeList += ['PCD_TYPE_DATA']
if Sku.DefaultValue == 'TRUE':
Pcd.InitString = 'INIT'
else:
if int(Sku.DefaultValue, 0) != 0:
Pcd.InitString = 'INIT'
#
# For UNIT64 type PCD's value, ULL should be append to avoid
# warning under linux building environment.
#
if Pcd.DatumType == "UINT64":
ValueList.append(Sku.DefaultValue + "ULL")
elif Pcd.DatumType in ("UINT32", "UINT16", "UINT8"):
ValueList.append(Sku.DefaultValue + "U")
elif Pcd.DatumType == "BOOLEAN":
if Sku.DefaultValue in ["1", "0"]:
ValueList.append(Sku.DefaultValue + "U")
else:
ValueList.append(Sku.DefaultValue)
DbValueList.append(Sku.DefaultValue)
Pcd.TokenTypeList = list(set(Pcd.TokenTypeList))
if Pcd.DatumType == 'VOID*':
Dict['SIZE_TABLE_CNAME'].append(CName)
Dict['SIZE_TABLE_GUID'].append(TokenSpaceGuid)
Dict['SIZE_TABLE_MAXIMUM_LENGTH'].append(str(Pcd.MaxDatumSize) + 'U')
Dict['SIZE_TABLE_CURRENT_LENGTH'].append(VoidStarTypeCurrSize)
SkuIndexTableTmp[0] = len(SkuIndexTableTmp) - 1
if len(Pcd.SkuInfoList) > 1:
Dict['SKU_INDEX_VALUE'].append(SkuIndexTableTmp)
if 'PCD_TYPE_HII' in Pcd.TokenTypeList:
Dict['VARIABLE_HEAD_CNAME_DECL'].append(CName)
Dict['VARIABLE_HEAD_GUID_DECL'].append(TokenSpaceGuid)
Dict['VARIABLE_HEAD_NUMSKUS_DECL'].append(len(Pcd.SkuInfoList))
Dict['VARIABLE_HEAD_VALUE'].append('{ %s }\n' % ' },\n { '.join(VariableHeadValueList))
Dict['VARDEF_HEADER'].append('_Variable_Header')
Dict['VARIABLE_DB_VALUE'].append(VariableDbValueList)
else:
Dict['VARDEF_HEADER'].append('')
if 'PCD_TYPE_VPD' in Pcd.TokenTypeList:
Dict['VPD_HEAD_CNAME_DECL'].append(CName)
Dict['VPD_HEAD_GUID_DECL'].append(TokenSpaceGuid)
Dict['VPD_HEAD_NUMSKUS_DECL'].append(len(Pcd.SkuInfoList))
Dict['VPD_HEAD_VALUE'].append('{ %s }' % ' }, { '.join(VpdHeadOffsetList))
Dict['VPD_DB_VALUE'].append(VpdDbOffsetList)
if 'PCD_TYPE_STRING' in Pcd.TokenTypeList:
Dict['STRING_HEAD_CNAME_DECL'].append(CName)
Dict['STRING_HEAD_GUID_DECL'].append(TokenSpaceGuid)
Dict['STRING_HEAD_NUMSKUS_DECL'].append(len(Pcd.SkuInfoList))
Dict['STRING_HEAD_VALUE'].append(', '.join(StringHeadOffsetList))
Dict['STRING_DB_VALUE'].append(StringDbOffsetList)
PCD_STRING_INDEX_MAP[len(Dict['STRING_HEAD_CNAME_DECL']) -1 ] = len(Dict['STRING_DB_VALUE']) -1
if 'PCD_TYPE_DATA' in Pcd.TokenTypeList:
Dict[Pcd.InitString+'_CNAME_DECL_'+Pcd.DatumType].append(CName)
Dict[Pcd.InitString+'_GUID_DECL_'+Pcd.DatumType].append(TokenSpaceGuid)
Dict[Pcd.InitString+'_NUMSKUS_DECL_'+Pcd.DatumType].append(len(Pcd.SkuInfoList))
if Pcd.InitString == 'UNINIT':
Dict['PCD_DATABASE_UNINIT_EMPTY'] = ''
else:
Dict[Pcd.InitString+'_VALUE_'+Pcd.DatumType].append(', '.join(ValueList))
Dict[Pcd.InitString+'_DB_VALUE_'+Pcd.DatumType].append(DbValueList)
if Phase == 'PEI':
NumberOfLocalTokens = NumberOfPeiLocalTokens
if Phase == 'DXE':
NumberOfLocalTokens = NumberOfDxeLocalTokens
Dict['TOKEN_INIT'] = ['' for x in range(NumberOfLocalTokens)]
Dict['TOKEN_CNAME'] = ['' for x in range(NumberOfLocalTokens)]
Dict['TOKEN_GUID'] = ['' for x in range(NumberOfLocalTokens)]
Dict['TOKEN_TYPE'] = ['' for x in range(NumberOfLocalTokens)]
Dict['LOCAL_TOKEN_NUMBER_DB_VALUE'] = ['' for x in range(NumberOfLocalTokens)]
Dict['PCD_CNAME'] = ['' for x in range(NumberOfLocalTokens)]
Dict['PCD_TOKENSPACE_MAP'] = ['' for x in range(NumberOfLocalTokens)]
Dict['PCD_CNAME_LENGTH'] = [0 for x in range(NumberOfLocalTokens)]
SkuEnablePcdIndex = 0
for Pcd in ReorderedDynPcdList:
CName = Pcd.TokenCName
TokenSpaceGuidCName = Pcd.TokenSpaceGuidCName
if Pcd.Phase != Phase:
continue
TokenSpaceGuid = GuidStructureStringToGuidValueName(Pcd.TokenSpaceGuidValue) #(Platform.PackageList, TokenSpaceGuidCName))
GeneratedTokenNumber = Platform.PcdTokenNumber[CName, TokenSpaceGuidCName] - 1
if Phase == 'DXE':
GeneratedTokenNumber -= NumberOfPeiLocalTokens
if len(Pcd.SkuInfoList) > 1:
Dict['PCD_ORDER_TOKEN_NUMBER_MAP'][GeneratedTokenNumber] = SkuEnablePcdIndex
SkuEnablePcdIndex += 1
for PcdItem in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdItem]:
CName = PcdItem[0]
if GlobalData.BuildOptionPcd:
for PcdItem in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, CName) == (PcdItem[0], PcdItem[1]):
Pcd.DefaultValue = PcdItem[2]
break
EdkLogger.debug(EdkLogger.DEBUG_1, "PCD = %s.%s" % (CName, TokenSpaceGuidCName))
EdkLogger.debug(EdkLogger.DEBUG_1, "phase = %s" % Phase)
EdkLogger.debug(EdkLogger.DEBUG_1, "GeneratedTokenNumber = %s" % str(GeneratedTokenNumber))
#
# following four Dict items hold the information for LocalTokenNumberTable
#
Dict['TOKEN_INIT'][GeneratedTokenNumber] = 'Init'
if Pcd.InitString == 'UNINIT':
Dict['TOKEN_INIT'][GeneratedTokenNumber] = 'Uninit'
Dict['TOKEN_CNAME'][GeneratedTokenNumber] = CName
Dict['TOKEN_GUID'][GeneratedTokenNumber] = TokenSpaceGuid
Dict['TOKEN_TYPE'][GeneratedTokenNumber] = ' | '.join(Pcd.TokenTypeList)
if Platform.Platform.PcdInfoFlag:
TokenSpaceGuidCNameArray = StringToArray('"' + TokenSpaceGuidCName + '"' )
if TokenSpaceGuidCNameArray not in Dict['PCD_TOKENSPACE']:
Dict['PCD_TOKENSPACE'].append(TokenSpaceGuidCNameArray)
Dict['PCD_TOKENSPACE_LENGTH'].append( len(TokenSpaceGuidCNameArray.split(",")) )
Dict['PCD_TOKENSPACE_MAP'][GeneratedTokenNumber] = Dict['PCD_TOKENSPACE'].index(TokenSpaceGuidCNameArray)
CNameBinArray = StringToArray('"' + CName + '"' )
Dict['PCD_CNAME'][GeneratedTokenNumber] = CNameBinArray
Dict['PCD_CNAME_LENGTH'][GeneratedTokenNumber] = len(CNameBinArray.split(","))
Pcd.TokenTypeList = list(set(Pcd.TokenTypeList))
# search the Offset and Table, used by LocalTokenNumberTableOffset
if 'PCD_TYPE_HII' in Pcd.TokenTypeList:
# Find index by CName, TokenSpaceGuid
Offset = GetMatchedIndex(CName, Dict['VARIABLE_HEAD_CNAME_DECL'], TokenSpaceGuid, Dict['VARIABLE_HEAD_GUID_DECL'])
assert(Offset != -1)
Table = Dict['VARIABLE_DB_VALUE']
if 'PCD_TYPE_VPD' in Pcd.TokenTypeList:
Offset = GetMatchedIndex(CName, Dict['VPD_HEAD_CNAME_DECL'], TokenSpaceGuid, Dict['VPD_HEAD_GUID_DECL'])
assert(Offset != -1)
Table = Dict['VPD_DB_VALUE']
if 'PCD_TYPE_STRING' in Pcd.TokenTypeList and 'PCD_TYPE_HII' not in Pcd.TokenTypeList:
# Find index by CName, TokenSpaceGuid
Offset = GetMatchedIndex(CName, Dict['STRING_HEAD_CNAME_DECL'], TokenSpaceGuid, Dict['STRING_HEAD_GUID_DECL'])
Offset = PCD_STRING_INDEX_MAP[Offset]
assert(Offset != -1)
Table = Dict['STRING_DB_VALUE']
if 'PCD_TYPE_DATA' in Pcd.TokenTypeList:
# need to store whether it is in init table or not
Offset = GetMatchedIndex(CName, Dict[Pcd.InitString+'_CNAME_DECL_'+Pcd.DatumType], TokenSpaceGuid, Dict[Pcd.InitString+'_GUID_DECL_'+Pcd.DatumType])
assert(Offset != -1)
if Pcd.InitString == 'UNINIT':
Table = Dict[Pcd.InitString+'_GUID_DECL_'+Pcd.DatumType]
else:
Table = Dict[Pcd.InitString+'_DB_VALUE_'+Pcd.DatumType]
Dict['LOCAL_TOKEN_NUMBER_DB_VALUE'][GeneratedTokenNumber] = (Offset, Table)
#
# Update VARDEF_HEADER
#
if 'PCD_TYPE_HII' in Pcd.TokenTypeList:
Dict['VARDEF_HEADER'][GeneratedTokenNumber] = '_Variable_Header'
else:
Dict['VARDEF_HEADER'][GeneratedTokenNumber] = ''
if Pcd.Type in gDynamicExPcd:
if Phase == 'DXE':
GeneratedTokenNumber += NumberOfPeiLocalTokens
#
# Per, PCD architecture specification, PCD Token Number is 1 based and 0 is defined as invalid token number.
# For each EX type PCD, a PCD Token Number is assigned. When the
# PCD Driver/PEIM map EX_GUID and EX_TOKEN_NUMBER to the PCD Token Number,
# the non-EX Protocol/PPI interface can be called to get/set the value. This assumption is made by
# Pcd Driver/PEIM in MdeModulePkg.
# Therefore, 1 is added to GeneratedTokenNumber to generate a PCD Token Number before being inserted
# to the EXMAPPING_TABLE.
#
Dict['EXMAPPING_TABLE_EXTOKEN'].append(str(Pcd.TokenValue) + 'U')
Dict['EXMAPPING_TABLE_LOCAL_TOKEN'].append(str(GeneratedTokenNumber + 1) + 'U')
Dict['EXMAPPING_TABLE_GUID_INDEX'].append(str(GuidList.index(TokenSpaceGuid)) + 'U')
if Platform.Platform.PcdInfoFlag:
for index in range(len(Dict['PCD_TOKENSPACE_MAP'])):
TokenSpaceIndex = StringTableSize
for i in range(Dict['PCD_TOKENSPACE_MAP'][index]):
TokenSpaceIndex += Dict['PCD_TOKENSPACE_LENGTH'][i]
Dict['PCD_TOKENSPACE_OFFSET'].append(TokenSpaceIndex)
for index in range(len(Dict['PCD_TOKENSPACE'])):
StringTableSize += Dict['PCD_TOKENSPACE_LENGTH'][index]
StringTableIndex += 1
for index in range(len(Dict['PCD_CNAME'])):
Dict['PCD_CNAME_OFFSET'].append(StringTableSize)
Dict['PCD_NAME_OFFSET'].append(Dict['PCD_TOKENSPACE_OFFSET'][index])
Dict['PCD_NAME_OFFSET'].append(StringTableSize)
StringTableSize += Dict['PCD_CNAME_LENGTH'][index]
StringTableIndex += 1
if GuidList != []:
Dict['GUID_TABLE_EMPTY'] = 'FALSE'
Dict['GUID_TABLE_SIZE'] = str(len(GuidList)) + 'U'
else:
Dict['GUID_STRUCTURE'] = [GuidStringToGuidStructureString('00000000-0000-0000-0000-000000000000')]
if StringTableIndex == 0:
Dict['STRING_TABLE_INDEX'].append('')
Dict['STRING_TABLE_LENGTH'].append(1)
Dict['STRING_TABLE_CNAME'].append('')
Dict['STRING_TABLE_GUID'].append('')
Dict['STRING_TABLE_VALUE'].append('{ 0 }')
else:
Dict['STRING_TABLE_EMPTY'] = 'FALSE'
Dict['STRING_TABLE_SIZE'] = str(StringTableSize) + 'U'
if Dict['SIZE_TABLE_CNAME'] == []:
Dict['SIZE_TABLE_CNAME'].append('')
Dict['SIZE_TABLE_GUID'].append('')
Dict['SIZE_TABLE_CURRENT_LENGTH'].append(['0U'])
Dict['SIZE_TABLE_MAXIMUM_LENGTH'].append('0U')
if NumberOfLocalTokens != 0:
Dict['DATABASE_EMPTY'] = 'FALSE'
Dict['LOCAL_TOKEN_NUMBER_TABLE_SIZE'] = NumberOfLocalTokens
Dict['LOCAL_TOKEN_NUMBER'] = NumberOfLocalTokens
if NumberOfExTokens != 0:
Dict['EXMAP_TABLE_EMPTY'] = 'FALSE'
Dict['EXMAPPING_TABLE_SIZE'] = str(NumberOfExTokens) + 'U'
Dict['EX_TOKEN_NUMBER'] = str(NumberOfExTokens) + 'U'
else:
Dict['EXMAPPING_TABLE_EXTOKEN'].append('0U')
Dict['EXMAPPING_TABLE_LOCAL_TOKEN'].append('0U')
Dict['EXMAPPING_TABLE_GUID_INDEX'].append('0U')
if NumberOfSizeItems != 0:
Dict['SIZE_TABLE_SIZE'] = str(NumberOfSizeItems * 2) + 'U'
if NumberOfSkuEnabledPcd != 0:
Dict['SKU_HEAD_SIZE'] = str(NumberOfSkuEnabledPcd) + 'U'
for AvailableSkuNumber in SkuObj.SkuIdNumberSet:
if AvailableSkuNumber not in Dict['SKUID_VALUE']:
Dict['SKUID_VALUE'].append(AvailableSkuNumber)
Dict['SKUID_VALUE'][0] = len(Dict['SKUID_VALUE']) - 1
AutoGenH.Append(gPcdDatabaseAutoGenH.Replace(Dict))
if NumberOfLocalTokens == 0:
AutoGenC.Append(gEmptyPcdDatabaseAutoGenC.Replace(Dict))
else:
#
# Update Size Table to the right order, it should be same with LocalTokenNumberTable
#
SizeCNameTempList = []
SizeGuidTempList = []
SizeCurLenTempList = []
SizeMaxLenTempList = []
ReOrderFlag = True
if len(Dict['SIZE_TABLE_CNAME']) == 1:
if not (Dict['SIZE_TABLE_CNAME'][0] and Dict['SIZE_TABLE_GUID'][0]):
ReOrderFlag = False
if ReOrderFlag:
for Count in range(len(Dict['TOKEN_CNAME'])):
for Count1 in range(len(Dict['SIZE_TABLE_CNAME'])):
if Dict['TOKEN_CNAME'][Count] == Dict['SIZE_TABLE_CNAME'][Count1] and \
Dict['TOKEN_GUID'][Count] == Dict['SIZE_TABLE_GUID'][Count1]:
SizeCNameTempList.append(Dict['SIZE_TABLE_CNAME'][Count1])
SizeGuidTempList.append(Dict['SIZE_TABLE_GUID'][Count1])
SizeCurLenTempList.append(Dict['SIZE_TABLE_CURRENT_LENGTH'][Count1])
SizeMaxLenTempList.append(Dict['SIZE_TABLE_MAXIMUM_LENGTH'][Count1])
for Count in range(len(Dict['SIZE_TABLE_CNAME'])):
Dict['SIZE_TABLE_CNAME'][Count] = SizeCNameTempList[Count]
Dict['SIZE_TABLE_GUID'][Count] = SizeGuidTempList[Count]
Dict['SIZE_TABLE_CURRENT_LENGTH'][Count] = SizeCurLenTempList[Count]
Dict['SIZE_TABLE_MAXIMUM_LENGTH'][Count] = SizeMaxLenTempList[Count]
AutoGenC.Append(gPcdDatabaseAutoGenC.Replace(Dict))
if Platform.Platform.VarCheckFlag:
dest = os.path.join(Platform.BuildDir, 'FV')
VarCheckTab.dump(dest, Phase)
Buffer = BuildExDataBase(Dict)
return AutoGenH, AutoGenC, Buffer
def GetOrderedDynamicPcdList(DynamicPcdList, PcdTokenNumberList):
ReorderedDyPcdList = [None for i in range(len(DynamicPcdList))]
for Pcd in DynamicPcdList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in PcdTokenNumberList:
ReorderedDyPcdList[PcdTokenNumberList[Pcd.TokenCName, Pcd.TokenSpaceGuidCName]-1] = Pcd
return ReorderedDyPcdList
| intel/ipmctl | BaseTools/Source/Python/AutoGen/GenPcdDb.py | Python | bsd-3-clause | 75,819 | 0.007755 |
import requests
from config import constants
from database import db
from database import db_models
from util import fips
R = db_models.Rep
HOUSE_MEMBERS_LEAVING_OFFICE_URL = 'https://api.propublica.org/congress/v1/114/house/members/leaving.json'
SENATE_MEMBERS_LEAVING_OFFICE_URL = 'https://api.propublica.org/congress/v1/114/senate/members/leaving.json'
PP_STATUS_TO_DB_STATUS = {
'Retiring': R.Status.RETIRING,
'Seeking another office': R.Status.SEEKING_OTHER_OFFICE,
'Left Congress': R.Status.LEFT_CONGRESS,
'Defeated in primary election': R.Status.DEFEATED_IN_PRIMARY,
}
def main():
populate_senators()
populate_reps()
def populate_senators():
response = requests.get(SENATE_MEMBERS_LEAVING_OFFICE_URL, headers={'X-API-Key': constants.PROPUBLICA_API_KEY})
for db_rep in R.query.filter(R.chamber == R.Chamber.SENATE):
for member in response.json()['results'][0]['members']:
if db_rep.state_code == member['state'] and db_rep.last_name == member['last_name']:
db_rep.status = PP_STATUS_TO_DB_STATUS[member['status']]
db_rep.status_note = member['note']
break
db.session.commit()
def populate_reps():
response = requests.get(HOUSE_MEMBERS_LEAVING_OFFICE_URL, headers={'X-API-Key': constants.PROPUBLICA_API_KEY})
info_by_district_code = {}
for member in response.json()['results'][0]['members']:
if member['state'] in fips.ONE_DISTRICT_STATE_CODES:
district_code = '%s00' % member['state']
else:
district_code = '%s%02d' % (member['state'], int(member['district']))
info_by_district_code[district_code] = {
'status': member['status'],
'note': member['note'],
}
for db_rep in R.query.filter(R.district_code.in_(info_by_district_code.keys())):
info = info_by_district_code[db_rep.district_code]
db_rep.status = PP_STATUS_TO_DB_STATUS[info['status']]
db_rep.status_note = info['note']
db.session.commit()
if __name__ == '__main__':
from tools import db_utils
with db_utils.request_context():
main()
| jlgoldman/writetogov | database/populate_rep_status_from_propublica.py | Python | bsd-3-clause | 2,149 | 0.005119 |
import asyncio
import asyncio.streams
from .client_exceptions import (ClientOSError, ClientPayloadError,
ClientResponseError, ServerDisconnectedError)
from .http import HttpResponseParser, StreamWriter
from .streams import EMPTY_PAYLOAD, DataQueue
class ResponseHandler(DataQueue, asyncio.streams.FlowControlMixin):
"""Helper class to adapt between Protocol and StreamReader."""
def __init__(self, *, loop=None, **kwargs):
asyncio.streams.FlowControlMixin.__init__(self, loop=loop)
DataQueue.__init__(self, loop=loop)
self.paused = False
self.transport = None
self.writer = None
self._should_close = False
self._message = None
self._payload = None
self._payload_parser = None
self._reading_paused = False
self._timer = None
self._skip_status = ()
self._tail = b''
self._upgraded = False
self._parser = None
@property
def upgraded(self):
return self._upgraded
@property
def should_close(self):
if (self._payload is not None and
not self._payload.is_eof() or self._upgraded):
return True
return (self._should_close or self._upgraded or
self.exception() is not None or
self._payload_parser is not None or
len(self) or self._tail)
def close(self):
transport = self.transport
if transport is not None:
transport.close()
self.transport = None
return transport
def is_connected(self):
return self.transport is not None
def connection_made(self, transport):
self.transport = transport
self.writer = StreamWriter(self, transport, self._loop)
def connection_lost(self, exc):
if self._payload_parser is not None:
try:
self._payload_parser.feed_eof()
except Exception:
pass
try:
self._parser.feed_eof()
except Exception as e:
if self._payload is not None:
self._payload.set_exception(
ClientPayloadError('Response payload is not completed'))
if not self.is_eof():
if isinstance(exc, OSError):
exc = ClientOSError(*exc.args)
if exc is None:
exc = ServerDisconnectedError()
DataQueue.set_exception(self, exc)
self.transport = self.writer = None
self._should_close = True
self._parser = None
self._message = None
self._payload = None
self._payload_parser = None
self._reading_paused = False
super().connection_lost(exc)
def eof_received(self):
pass
def pause_reading(self):
if not self._reading_paused:
try:
self.transport.pause_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = True
def resume_reading(self):
if self._reading_paused:
try:
self.transport.resume_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = False
def set_exception(self, exc):
self._should_close = True
super().set_exception(exc)
def set_parser(self, parser, payload):
self._payload = payload
self._payload_parser = parser
if self._tail:
data, self._tail = self._tail, None
self.data_received(data)
def set_response_params(self, *, timer=None,
skip_payload=False,
skip_status_codes=(),
read_until_eof=False):
self._skip_payload = skip_payload
self._skip_status_codes = skip_status_codes
self._read_until_eof = read_until_eof
self._parser = HttpResponseParser(
self, self._loop, timer=timer,
payload_exception=ClientPayloadError,
read_until_eof=read_until_eof)
if self._tail:
data, self._tail = self._tail, b''
self.data_received(data)
def data_received(self, data):
if not data:
return
# custom payload parser
if self._payload_parser is not None:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self._payload = None
self._payload_parser = None
if tail:
self.data_received(tail)
return
else:
if self._upgraded or self._parser is None:
# i.e. websocket connection, websocket parser is not set yet
self._tail += data
else:
# parse http messages
try:
messages, upgraded, tail = self._parser.feed_data(data)
except BaseException as exc:
import traceback
traceback.print_exc()
self._should_close = True
self.set_exception(
ClientResponseError(code=400, message=str(exc)))
self.transport.close()
return
self._upgraded = upgraded
for message, payload in messages:
if message.should_close:
self._should_close = True
self._message = message
self._payload = payload
if (self._skip_payload or
message.code in self._skip_status_codes):
self.feed_data((message, EMPTY_PAYLOAD), 0)
else:
self.feed_data((message, payload), 0)
if upgraded:
self.data_received(tail)
else:
self._tail = tail
| alex-eri/aiohttp-1 | aiohttp/client_proto.py | Python | apache-2.0 | 6,070 | 0 |
#!/usr/bin/env python
import imp
import os
import sys
PYCART_DIR = ''.join(['python-', '.'.join(map(str, sys.version_info[:2]))])
try:
zvirtenv = os.path.join(os.environ['OPENSHIFT_HOMEDIR'], PYCART_DIR,
'virtenv', 'bin', 'activate_this.py')
execfile(zvirtenv, dict(__file__ = zvirtenv) )
except IOError:
pass
def run_gevent_server(app, ip, port=8181):
from gevent.pywsgi import WSGIServer
WSGIServer((ip, port), app).serve_forever()
def run_simple_httpd_server(app, ip, port=8181):
from wsgiref.simple_server import make_server
make_server(ip, port, app).serve_forever()
#
# IMPORTANT: Put any additional includes below this line. If placed above this
# line, it's possible required libraries won't be in your searchable path
#
#
# main():
#
if __name__ == '__main__':
ip = os.environ['OPENSHIFT_PYTHON_IP']
port = 8181
zapp = imp.load_source('application', 'wsgi/application')
# Use gevent if we have it, otherwise run a simple httpd server.
print 'Starting WSGIServer on %s:%d ... ' % (ip, port)
try:
run_gevent_server(zapp.application, ip, port)
except:
print 'gevent probably not installed - using default simple server ...'
run_simple_httpd_server(zapp.application, ip, port)
| getupcloud/openshift-nginx-python-2.7 | app.py | Python | apache-2.0 | 1,287 | 0.018648 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.