signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def _get_sender ( * sender_params , ** kwargs ) :
"""Utility function acting as a Sender factory - ensures senders don ' t get
created twice of more for the same target server"""
|
notify_func = kwargs [ 'notify_func' ]
with _sender_instances_lock :
existing_sender = _sender_instances . get ( sender_params , None )
if existing_sender :
sender = existing_sender
sender . _notify = notify_func
else :
sender = _Sender ( * sender_params , notify = notify_func )
_sender_instances [ sender_params ] = sender
return sender
|
def get_cell ( self , index , column ) :
"""For a single index and column value return the value of the cell
: param index : index value
: param column : column name
: return : value"""
|
i = sorted_index ( self . _index , index ) if self . _sort else self . _index . index ( index )
c = self . _columns . index ( column )
return self . _data [ c ] [ i ]
|
def taper_shift ( waveform , output ) :
"""Add waveform to output with waveform shifted accordingly ( for tapering
multi - mode ringdowns )"""
|
if len ( waveform ) == len ( output ) :
output . data += waveform . data
else :
output . data [ len ( output ) - len ( waveform ) : ] += waveform . data
return output
|
def get_config_directory ( appname ) :
"""Get OS - specific configuration directory .
: type appname : str
: arg appname : capitalized name of the application"""
|
if platform . system ( ) . lower ( ) == 'windows' :
path = os . path . join ( os . getenv ( 'APPDATA' ) or '~' , appname , appname )
elif platform . system ( ) . lower ( ) == 'darwin' :
path = os . path . join ( '~' , 'Library' , 'Application Support' , appname )
else :
path = os . path . join ( os . getenv ( 'XDG_CONFIG_HOME' ) or '~/.config' , appname . lower ( ) )
return os . path . expanduser ( path )
|
def gatk_haplotype_caller ( job , bam , bai , ref , fai , ref_dict , annotations = None , emit_threshold = 10.0 , call_threshold = 30.0 , unsafe_mode = False , hc_output = None ) :
"""Uses GATK HaplotypeCaller to identify SNPs and INDELs . Outputs variants in a Genomic VCF file .
: param JobFunctionWrappingJob job : passed automatically by Toil
: param str bam : FileStoreID for BAM file
: param str bai : FileStoreID for BAM index file
: param str ref : FileStoreID for reference genome fasta file
: param str ref _ dict : FileStoreID for reference sequence dictionary file
: param str fai : FileStoreID for reference fasta index file
: param list [ str ] annotations : List of GATK variant annotations , default is None
: param float emit _ threshold : Minimum phred - scale confidence threshold for a variant to be emitted , default is 10.0
: param float call _ threshold : Minimum phred - scale confidence threshold for a variant to be called , default is 30.0
: param bool unsafe _ mode : If True , runs gatk UNSAFE mode : " - U ALLOW _ SEQ _ DICT _ INCOMPATIBILITY "
: param str hc _ output : URL or local path to pre - cooked VCF file , default is None
: return : FileStoreID for GVCF file
: rtype : str"""
|
job . fileStore . logToMaster ( 'Running GATK HaplotypeCaller' )
inputs = { 'genome.fa' : ref , 'genome.fa.fai' : fai , 'genome.dict' : ref_dict , 'input.bam' : bam , 'input.bam.bai' : bai }
work_dir = job . fileStore . getLocalTempDir ( )
for name , file_store_id in inputs . iteritems ( ) :
job . fileStore . readGlobalFile ( file_store_id , os . path . join ( work_dir , name ) )
# Call GATK - - HaplotypeCaller with parameters to produce a genomic VCF file :
# https : / / software . broadinstitute . org / gatk / documentation / article ? id = 2803
command = [ '-T' , 'HaplotypeCaller' , '-nct' , str ( job . cores ) , '-R' , 'genome.fa' , '-I' , 'input.bam' , '-o' , 'output.g.vcf' , '-stand_call_conf' , str ( call_threshold ) , '-stand_emit_conf' , str ( emit_threshold ) , '-variant_index_type' , 'LINEAR' , '-variant_index_parameter' , '128000' , '--genotyping_mode' , 'Discovery' , '--emitRefConfidence' , 'GVCF' ]
if unsafe_mode :
command = [ '-U' , 'ALLOW_SEQ_DICT_INCOMPATIBILITY' ] + command
if annotations :
for annotation in annotations :
command . extend ( [ '-A' , annotation ] )
# Uses docker _ call mock mode to replace output with hc _ output file
outputs = { 'output.g.vcf' : hc_output }
docker_call ( job = job , work_dir = work_dir , env = { 'JAVA_OPTS' : '-Djava.io.tmpdir=/data/ -Xmx{}' . format ( job . memory ) } , parameters = command , tool = 'quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2' , inputs = inputs . keys ( ) , outputs = outputs , mock = True if outputs [ 'output.g.vcf' ] else False )
return job . fileStore . writeGlobalFile ( os . path . join ( work_dir , 'output.g.vcf' ) )
|
def _rm_gos_edges ( rm_goids , edges_all ) :
"""Remove any is _ a edges that contain user - specified edges ."""
|
edges_reduced = [ ]
for goid_child , goid_parent in sorted ( edges_all , key = lambda t : t [ 1 ] ) :
if goid_child not in rm_goids and goid_parent not in rm_goids :
edges_reduced . append ( ( goid_child , goid_parent ) )
return edges_reduced
|
def cmap_center_point_adjust ( cmap , range , center ) :
"""Converts center to a ratio between 0 and 1 of the
range given and calls cmap _ center _ adjust ( ) . returns
a new adjusted colormap accordingly
: param cmap : colormap instance
: param range : Tuple of ( min , max )
: param center : New cmap center"""
|
if not ( ( range [ 0 ] < center ) and ( center < range [ 1 ] ) ) :
return cmap
return cmap_center_adjust ( cmap , abs ( center - range [ 0 ] ) / abs ( range [ 1 ] - range [ 0 ] ) )
|
def _uniform_sample ( self ) :
"""Sampling method .
First uniformly sample a demonstration from the set of demonstrations .
Then uniformly sample a state from the selected demonstration ."""
|
# get a random episode index
ep_ind = random . choice ( self . demo_list )
# select a flattened mujoco state uniformly from this episode
states = self . demo_file [ "data/{}/states" . format ( ep_ind ) ] . value
state = random . choice ( states )
if self . need_xml :
model_xml = self . _xml_for_episode_index ( ep_ind )
xml = postprocess_model_xml ( model_xml )
return state , xml
return state
|
def logit ( x , a = 0. , b = 1. ) :
r"""Computes the logit function with domain : math : ` x \ in ( a , b ) ` .
This is given by :
. . math : :
\ mathrm { logit } ( x ; a , b ) = \ log \ left ( \ frac { x - a } { b - x } \ right ) .
Note that this is also the inverse of the logistic function with range
: math : ` ( a , b ) ` .
Parameters
x : float
The value to evaluate .
a : float , optional
The minimum bound of the domain of x . Default is 0.
b : float , optional
The maximum bound of the domain of x . Default is 1.
Returns
float
The logit of x ."""
|
return numpy . log ( x - a ) - numpy . log ( b - x )
|
def _getPercentile ( points , n , interpolate = False ) :
"""Percentile is calculated using the method outlined in the NIST Engineering
Statistics Handbook :
http : / / www . itl . nist . gov / div898 / handbook / prc / section2 / prc252 . htm"""
|
sortedPoints = sorted ( not_none ( points ) )
if len ( sortedPoints ) == 0 :
return None
fractionalRank = ( n / 100.0 ) * ( len ( sortedPoints ) + 1 )
rank = int ( fractionalRank )
rankFraction = fractionalRank - rank
if not interpolate :
rank += int ( math . ceil ( rankFraction ) )
if rank == 0 :
percentile = sortedPoints [ 0 ]
elif rank - 1 == len ( sortedPoints ) :
percentile = sortedPoints [ - 1 ]
else :
percentile = sortedPoints [ rank - 1 ]
# Adjust for 0 - index
if interpolate :
if rank != len ( sortedPoints ) : # if a next value exists
nextValue = sortedPoints [ rank ]
percentile = percentile + rankFraction * ( nextValue - percentile )
return percentile
|
def segment_centre_of_mass ( seg ) :
'''Calculate and return centre of mass of a segment .
C , seg _ volalculated as centre of mass of conical frustum'''
|
h = mm . segment_length ( seg )
r0 = seg [ 0 ] [ COLS . R ]
r1 = seg [ 1 ] [ COLS . R ]
num = r0 * r0 + 2 * r0 * r1 + 3 * r1 * r1
denom = 4 * ( r0 * r0 + r0 * r1 + r1 * r1 )
centre_of_mass_z_loc = num / denom
return seg [ 0 ] [ COLS . XYZ ] + ( centre_of_mass_z_loc / h ) * ( seg [ 1 ] [ COLS . XYZ ] - seg [ 0 ] [ COLS . XYZ ] )
|
def get_outlier_info ( pronac ) :
"""Return if a project with the given
pronac is an outlier based on raised funds ."""
|
df = data . planilha_captacao
raised_funds_averages = data . segment_raised_funds_average . to_dict ( 'index' )
segment_id = df [ df [ 'Pronac' ] == pronac ] [ 'Segmento' ] . iloc [ 0 ]
mean = raised_funds_averages [ segment_id ] [ 'mean' ]
std = raised_funds_averages [ segment_id ] [ 'std' ]
project_raised_funds = get_project_raised_funds ( pronac )
outlier = gaussian_outlier . is_outlier ( project_raised_funds , mean , std )
return ( outlier , mean , std , project_raised_funds )
|
def apply_transform ( self , matrix ) :
"""Apply a transform to the sphere primitive
Parameters
matrix : ( 4,4 ) float , homogenous transformation"""
|
matrix = np . asanyarray ( matrix , dtype = np . float64 )
if matrix . shape != ( 4 , 4 ) :
raise ValueError ( 'shape must be 4,4' )
center = np . dot ( matrix , np . append ( self . primitive . center , 1.0 ) ) [ : 3 ]
self . primitive . center = center
|
def get_orders ( self , instrument = None , count = 50 ) :
"""See more :
http : / / developer . oanda . com / rest - live / orders / # getOrdersForAnAccount"""
|
url = "{0}/{1}/accounts/{2}/orders" . format ( self . domain , self . API_VERSION , self . account_id )
params = { "instrument" : instrument , "count" : count }
try :
return self . _Client__call ( uri = url , params = params , method = "get" )
except RequestException :
return False
except AssertionError :
return False
|
def main ( sniffer_instance = None , test_args = ( ) , progname = sys . argv [ 0 ] , args = sys . argv [ 1 : ] ) :
"""Runs the program . This is used when you want to run this program standalone .
` ` sniffer _ instance ` ` A class ( usually subclassed of Sniffer ) that hooks into the
scanner and handles running the test framework . Defaults to
Sniffer instance .
` ` test _ args ` ` This function normally extracts args from ` ` - - test - arg ARG ` ` command . A
preset argument list can be passed . Defaults to an empty tuple .
` ` program ` ` Program name . Defaults to sys . argv [ 0 ] .
` ` args ` ` Command line arguments . Defaults to sys . argv [ 1 : ]"""
|
parser = OptionParser ( version = "%prog " + __version__ )
parser . add_option ( '-w' , '--wait' , dest = "wait_time" , metavar = "TIME" , default = 0.5 , type = "float" , help = "Wait time, in seconds, before possibly rerunning" "tests. (default: %default)" )
parser . add_option ( '--no-clear' , dest = "clear_on_run" , default = True , action = "store_false" , help = "Disable the clearing of screen" )
parser . add_option ( '--debug' , dest = "debug" , default = False , action = "store_true" , help = "Enabled debugging output. (default: %default)" )
parser . add_option ( '-x' , '--test-arg' , dest = "test_args" , default = [ ] , action = "append" , help = "Arguments to pass to nose (use multiple times to " "pass multiple arguments.)" )
( options , args ) = parser . parse_args ( args )
test_args = test_args + tuple ( options . test_args )
if options . debug :
print ( "Options:" , options )
print ( "Test Args:" , test_args )
try :
print ( "Starting watch..." )
run ( sniffer_instance , options . wait_time , options . clear_on_run , test_args , options . debug )
except KeyboardInterrupt :
print ( "Good bye." )
except Exception :
import traceback
traceback . print_exc ( )
return sys . exit ( 1 )
return sys . exit ( 0 )
|
def available_migrations ( ) :
'''List available migrations for udata and enabled plugins
Each row is tuple with following signature :
( plugin , package , filename )'''
|
migrations = [ ]
for filename in resource_listdir ( 'udata' , 'migrations' ) :
if filename . endswith ( '.js' ) :
migrations . append ( ( 'udata' , 'udata' , filename ) )
plugins = entrypoints . get_enabled ( 'udata.models' , current_app )
for plugin , module in plugins . items ( ) :
if resource_isdir ( module . __name__ , 'migrations' ) :
for filename in resource_listdir ( module . __name__ , 'migrations' ) :
if filename . endswith ( '.js' ) :
migrations . append ( ( plugin , module . __name__ , filename ) )
return sorted ( migrations , key = lambda r : r [ 2 ] )
|
def filter_by_status ( weather_list , status , weather_code_registry ) :
"""Filters out from the provided list of * Weather * objects a sublist of items
having a status corresponding to the provided one . The lookup is performed
against the provided * WeatherCodeRegistry * object .
: param weathers : a list of * Weather * objects
: type weathers : list
: param status : a string indicating a detailed weather status
: type status : str
: param weather _ code _ registry : a * WeatherCodeRegistry * object
: type weather _ code _ registry : * WeatherCodeRegistry *
: returns : ` ` True ` ` if the check is positive , ` ` False ` ` otherwise"""
|
result = [ ]
for weather in weather_list :
if status_is ( weather , status , weather_code_registry ) :
result . append ( weather )
return result
|
def get_status_badge ( self , project , definition , branch_name = None , stage_name = None , job_name = None , configuration = None , label = None ) :
"""GetStatusBadge .
[ Preview API ] < p > Gets the build status for a definition , optionally scoped to a specific branch , stage , job , and configuration . < / p > < p > If there are more than one , then it is required to pass in a stageName value when specifying a jobName , and the same rule then applies for both if passing a configuration parameter . < / p >
: param str project : Project ID or project name
: param str definition : Either the definition name with optional leading folder path , or the definition id .
: param str branch _ name : Only consider the most recent build for this branch .
: param str stage _ name : Use this stage within the pipeline to render the status .
: param str job _ name : Use this job within a stage of the pipeline to render the status .
: param str configuration : Use this job configuration to render the status
: param str label : Replaces the default text on the left side of the badge .
: rtype : str"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if definition is not None :
route_values [ 'definition' ] = self . _serialize . url ( 'definition' , definition , 'str' )
query_parameters = { }
if branch_name is not None :
query_parameters [ 'branchName' ] = self . _serialize . query ( 'branch_name' , branch_name , 'str' )
if stage_name is not None :
query_parameters [ 'stageName' ] = self . _serialize . query ( 'stage_name' , stage_name , 'str' )
if job_name is not None :
query_parameters [ 'jobName' ] = self . _serialize . query ( 'job_name' , job_name , 'str' )
if configuration is not None :
query_parameters [ 'configuration' ] = self . _serialize . query ( 'configuration' , configuration , 'str' )
if label is not None :
query_parameters [ 'label' ] = self . _serialize . query ( 'label' , label , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '07acfdce-4757-4439-b422-ddd13a2fcc10' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'str' , response )
|
def _urlparse ( path ) :
"""Like urlparse except it assumes ' file : / / ' if no scheme is specified"""
|
url = urlparse . urlparse ( path )
_validate_url ( url )
if not url . scheme or url . scheme == 'file://' : # Normalize path , and set scheme to " file " if missing
path = os . path . abspath ( os . path . expanduser ( path ) )
url = urlparse . urlparse ( 'file://' + path )
return url
|
def read ( self , numberOfBytes ) :
"""Read from a port on dummy _ serial .
The response is dependent on what was written last to the port on dummy _ serial ,
and what is defined in the : data : ` RESPONSES ` dictionary .
Args :
numberOfBytes ( int ) : For compability with the real function .
Returns a * * string * * for Python2 and * * bytes * * for Python3.
If the response is shorter than numberOfBytes , it will sleep for timeout .
If the response is longer than numberOfBytes , it will return only numberOfBytes bytes ."""
|
if VERBOSE :
_print_out ( '\nDummy_serial: Reading from port (max length {!r} bytes)' . format ( numberOfBytes ) )
if numberOfBytes < 0 :
raise IOError ( 'Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}' . format ( numberOfBytes ) )
if not self . _isOpen :
raise IOError ( 'Dummy_serial: Trying to read, but the port is not open.' )
# Do the actual reading from the waiting data , and simulate the influence of numberOfBytes
if self . _waiting_data == DEFAULT_RESPONSE :
returnstring = self . _waiting_data
elif numberOfBytes == len ( self . _waiting_data ) :
returnstring = self . _waiting_data
self . _waiting_data = NO_DATA_PRESENT
elif numberOfBytes < len ( self . _waiting_data ) :
if VERBOSE :
_print_out ( 'Dummy_serial: The numberOfBytes to read is smaller than the available data. ' + 'Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}' . format ( self . _waiting_data , len ( self . _waiting_data ) , numberOfBytes ) )
returnstring = self . _waiting_data [ : numberOfBytes ]
self . _waiting_data = self . _waiting_data [ numberOfBytes : ]
else : # Wait for timeout , as we have asked for more data than available
if VERBOSE :
_print_out ( 'Dummy_serial: The numberOfBytes to read is larger than the available data. ' + 'Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}' . format ( self . _waiting_data , len ( self . _waiting_data ) , numberOfBytes ) )
time . sleep ( self . timeout )
returnstring = self . _waiting_data
self . _waiting_data = NO_DATA_PRESENT
# TODO Adapt the behavior to better mimic the Windows behavior
if VERBOSE :
_print_out ( 'Dummy_serial read return data: {!r} (has length {})\n' . format ( returnstring , len ( returnstring ) ) )
if sys . version_info [ 0 ] > 2 : # Convert types to make it python3 compatible
return bytes ( returnstring , encoding = 'latin1' )
else :
return returnstring
|
def squareform_isfc ( isfcs , iscs = None ) :
"""Converts square ISFCs to condensed ISFCs ( and ISCs ) , and vice - versa
If input is a 2 - or 3 - dimensional array of square ISFC matrices , converts
this to the condensed off - diagonal ISFC values ( i . e . , the vectorized
triangle ) and the diagonal ISC values . In this case , input must be a
single array of shape either n _ voxels x n _ voxels or n _ subjects ( or
n _ pairs ) x n _ voxels x n _ voxels . The condensed ISFC values are vectorized
according to scipy . spatial . distance . squareform , yielding n _ voxels *
( n _ voxels - 1 ) / 2 values comprising every voxel pair . Alternatively , if
input is an array of condensed off - diagonal ISFC values and an array of
diagonal ISC values , the square ( redundant ) ISFC values are returned .
This function mimics scipy . spatial . distance . squareform , but is intended
to retain the diagonal ISC values .
Parameters
isfcs : ndarray
Either condensed or redundant ISFC values
iscs : ndarray , optional
Diagonal ISC values , required when input is condensed
Returns
isfcs : ndarray or tuple of ndarrays
If condensed ISFCs are passed , a single redundant ISFC array is
returned ; if redundant ISFCs are passed , both a condensed off -
diagonal ISFC array and the diagonal ISC values are returned"""
|
# Check if incoming ISFCs are square ( redundant )
if not type ( iscs ) == np . ndarray and isfcs . shape [ - 2 ] == isfcs . shape [ - 1 ] :
if isfcs . ndim == 2 :
isfcs = isfcs [ np . newaxis , ... ]
if isfcs . ndim == 3 :
iscs = np . diagonal ( isfcs , axis1 = 1 , axis2 = 2 )
isfcs = np . vstack ( [ squareform ( isfc , checks = False ) [ np . newaxis , : ] for isfc in isfcs ] )
else :
raise ValueError ( "Square (redundant) ISFCs must be square " "with multiple subjects or pairs of subjects " "indexed by the first dimension" )
if isfcs . shape [ 0 ] == iscs . shape [ 0 ] == 1 :
isfcs , iscs = isfcs [ 0 ] , iscs [ 0 ]
return isfcs , iscs
# Otherwise , convert from condensed to redundant
else :
if isfcs . ndim == iscs . ndim == 1 :
isfcs , iscs = isfcs [ np . newaxis , : ] , iscs [ np . newaxis , : ]
isfcs_stack = [ ]
for isfc , isc in zip ( isfcs , iscs ) :
isfc_sq = squareform ( isfc , checks = False )
np . fill_diagonal ( isfc_sq , isc )
isfcs_stack . append ( isfc_sq [ np . newaxis , ... ] )
isfcs = np . vstack ( isfcs_stack )
if isfcs . shape [ 0 ] == 1 :
isfcs = isfcs [ 0 ]
return isfcs
|
def get_git_refs ( self ) :
""": calls : ` GET / repos / : owner / : repo / git / refs < http : / / developer . github . com / v3 / git / refs > ` _
: rtype : : class : ` github . PaginatedList . PaginatedList ` of : class : ` github . GitRef . GitRef `"""
|
return github . PaginatedList . PaginatedList ( github . GitRef . GitRef , self . _requester , self . url + "/git/refs" , None )
|
def close_debt_position ( self , symbol , account = None ) :
"""Close a debt position and reclaim the collateral
: param str symbol : Symbol to close debt position for
: raises ValueError : if symbol has no open call position"""
|
if not account :
if "default_account" in self . blockchain . config :
account = self . blockchain . config [ "default_account" ]
if not account :
raise ValueError ( "You need to provide an account" )
account = Account ( account , full = True , blockchain_instance = self . blockchain )
debts = self . list_debt_positions ( account )
if symbol not in debts :
raise ValueError ( "No call position open for %s" % symbol )
debt = debts [ symbol ]
asset = debt [ "debt" ] [ "asset" ]
collateral_asset = debt [ "collateral" ] [ "asset" ]
op = operations . Call_order_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "delta_debt" : { "amount" : int ( - float ( debt [ "debt" ] ) * 10 ** asset [ "precision" ] ) , "asset_id" : asset [ "id" ] , } , "delta_collateral" : { "amount" : int ( - float ( debt [ "collateral" ] ) * 10 ** collateral_asset [ "precision" ] ) , "asset_id" : collateral_asset [ "id" ] , } , "funding_account" : account [ "id" ] , "extensions" : [ ] , } )
return self . blockchain . finalizeOp ( op , account [ "name" ] , "active" )
|
def get ( self , section , option , raw = False , vars = None ) :
"""Get an option value for a given section .
If ` vars ' is provided , it must be a dictionary . The option is looked up
in ` vars ' ( if provided ) , ` section ' , and in ` defaults ' in that order .
All % interpolations are expanded in the return values , unless the
optional argument ` raw ' is true . Values for interpolation keys are
looked up in the same manner as the option .
The section DEFAULT is special ."""
|
sectiondict = { }
try :
sectiondict = self . _sections [ section ]
except KeyError :
if section != DEFAULTSECT :
raise NoSectionError ( section )
# Update with the entry specific variables
vardict = { }
if vars :
for key , value in vars . items ( ) :
vardict [ self . optionxform ( key ) ] = value
d = _Chainmap ( vardict , sectiondict , self . _defaults )
option = self . optionxform ( option )
try :
value = d [ option ]
except KeyError :
raise NoOptionError ( option , section )
if raw or value is None :
return value
else :
return self . _interpolate ( section , option , value , d )
|
def find_endurance_tier_iops_per_gb ( volume ) :
"""Find the tier for the given endurance volume ( IOPS per GB )
: param volume : The volume for which the tier level is desired
: return : Returns a float value indicating the IOPS per GB for the volume"""
|
tier = volume [ 'storageTierLevel' ]
iops_per_gb = 0.25
if tier == "LOW_INTENSITY_TIER" :
iops_per_gb = 0.25
elif tier == "READHEAVY_TIER" :
iops_per_gb = 2
elif tier == "WRITEHEAVY_TIER" :
iops_per_gb = 4
elif tier == "10_IOPS_PER_GB" :
iops_per_gb = 10
else :
raise ValueError ( "Could not find tier IOPS per GB for this volume" )
return iops_per_gb
|
async def main ( ) :
"""Load devices and scenes , run first scene ."""
|
pyvlx = PyVLX ( 'pyvlx.yaml' )
# Alternative :
# pyvlx = PyVLX ( host = " 192.168.2.127 " , password = " velux123 " , timeout = 60)
await pyvlx . load_devices ( )
print ( pyvlx . devices [ 1 ] )
print ( pyvlx . devices [ 'Fenster 4' ] )
await pyvlx . load_scenes ( )
print ( pyvlx . scenes [ 0 ] )
print ( pyvlx . scenes [ 'Bath Closed' ] )
# opening / closing windows by running scenes , yay !
await pyvlx . scenes [ 1 ] . run ( )
await pyvlx . disconnect ( )
|
def displayValue ( self , vocab , value , widget ) :
"""Overwrite the Script ( Python ) ` displayValue . py ` located at
` Products . Archetypes . skins . archetypes ` to handle the references
of our Picklist Widget ( Methods ) gracefully .
This method gets called by the ` picklist . pt ` template like this :
display python : context . displayValue ( vocab , value , widget ) ; " """
|
# Taken from the Script ( Python )
t = self . restrictedTraverse ( '@@at_utils' ) . translate
# ensure we have strings , otherwise the ` getValue ` method of
# Products . Archetypes . utils will raise a TypeError
def to_string ( v ) :
if isinstance ( v , basestring ) :
return v
return api . get_title ( v )
if isinstance ( value , ( list , tuple ) ) :
value = map ( to_string , value )
return t ( vocab , value , widget )
|
def copy_file_clipboard ( self , fnames = None ) :
"""Copy file ( s ) / folders ( s ) to clipboard ."""
|
if fnames is None :
fnames = self . get_selected_filenames ( )
if not isinstance ( fnames , ( tuple , list ) ) :
fnames = [ fnames ]
try :
file_content = QMimeData ( )
file_content . setUrls ( [ QUrl . fromLocalFile ( _fn ) for _fn in fnames ] )
cb = QApplication . clipboard ( )
cb . setMimeData ( file_content , mode = cb . Clipboard )
except Exception as e :
QMessageBox . critical ( self , _ ( 'File/Folder copy error' ) , _ ( "Cannot copy this type of file(s) or " "folder(s). The error was:\n\n" ) + to_text_string ( e ) )
|
def load_mod ( module , package ) :
"""Load a module named ` ` module ` ` from given search ` ` path ` `
The module path prefix is set according to the ` ` prefix ` ` argument .
By defualt the module is loaded as if it comes from a global
' db _ migrations ' package . As such , it may conflict with any ' db _ migration '
package . The module can be looked up in ` ` sys . modules ` ` as
` ` db _ migration . MODNAME ` ` where ` ` MODNAME ` ` is the name supplied as
` ` module ` ` argument . Keep in mind that relative imports from within the
module depend on this prefix .
This function raises an ` ` ImportError ` ` exception if module is not found .
: param module : name of the module to load
: param package : package object
: returns : module object"""
|
name = '%s.%s' % ( package . __name__ , module )
if name in sys . modules :
return sys . modules [ name ]
return importlib . import_module ( name , package = package . __name__ )
|
def podcast_episode ( self , podcast_episode_id ) :
"""Get information about a podcast _ episode .
Parameters :
podcast _ episode _ id ( str ) : A podcast episode ID .
Returns :
dict : Podcast episode information ."""
|
response = self . _call ( mc_calls . PodcastFetchEpisode , podcast_episode_id )
podcast_episode_info = [ podcast_episode for podcast_episode in response . body if not podcast_episode [ 'deleted' ] ]
return podcast_episode_info
|
def SetCTypesForLibrary ( libname , fn_table ) :
"""Set function argument types and return types for an ObjC library .
Args :
libname : Library name string
fn _ table : List of ( function , [ arg types ] , return types ) tuples
Returns :
ctypes . CDLL with types set according to fn _ table
Raises :
ErrorLibNotFound : Can ' t find specified lib"""
|
libpath = ctypes . util . find_library ( libname )
if not libpath :
raise ErrorLibNotFound ( 'Library %s not found' % libname )
lib = ctypes . cdll . LoadLibrary ( libpath )
# We need to define input / output parameters for all functions we use
for ( function , args , result ) in fn_table :
f = getattr ( lib , function )
f . argtypes = args
f . restype = result
return lib
|
def _to_bel_lines_body ( graph ) -> Iterable [ str ] :
"""Iterate the lines of a BEL graph ' s corresponding BEL script ' s body .
: param pybel . BELGraph graph : A BEL graph"""
|
qualified_edges = sort_qualified_edges ( graph )
for citation , citation_edges in group_citation_edges ( qualified_edges ) :
yield 'SET Citation = {{{}}}\n' . format ( citation )
for evidence , evidence_edges in group_evidence_edges ( citation_edges ) :
yield 'SET SupportingText = "{}"' . format ( evidence )
for u , v , _ , data in evidence_edges :
annotations_data = data . get ( ANNOTATIONS )
keys = sorted ( annotations_data ) if annotations_data is not None else tuple ( )
for key in keys :
yield _set_annotation_to_str ( annotations_data , key )
yield graph . edge_to_bel ( u , v , data )
if keys :
yield _unset_annotation_to_str ( keys )
yield 'UNSET SupportingText'
yield 'UNSET Citation\n'
yield '#' * 80
|
def __calculate_boltzmann_factor ( self , state_key , next_action_list ) :
'''Calculate boltzmann factor .
Args :
state _ key : The key of state .
next _ action _ list : The possible action in ` self . t + 1 ` .
If the length of this list is 0 , all action should be possible .
Returns :
[ ( ` The key of action ` , ` boltzmann probability ` ) ]'''
|
sigmoid = self . __calculate_sigmoid ( )
q_df = self . q_df [ self . q_df . state_key == state_key ]
q_df = q_df [ q_df . isin ( next_action_list ) ]
q_df [ "boltzmann_factor" ] = q_df [ "q_value" ] / sigmoid
q_df [ "boltzmann_factor" ] = q_df [ "boltzmann_factor" ] . apply ( np . exp )
q_df [ "boltzmann_factor" ] = q_df [ "boltzmann_factor" ] / q_df [ "boltzmann_factor" ] . sum ( )
return q_df
|
def vxvyvz_to_galcencyl ( vx , vy , vz , X , Y , Z , vsun = [ 0. , 1. , 0. ] , Xsun = 1. , Zsun = 0. , galcen = False , _extra_rot = True ) :
"""NAME :
vxvyvz _ to _ galcencyl
PURPOSE :
transform velocities in XYZ coordinates ( wrt Sun ) to cylindrical Galactocentric coordinates for velocities
INPUT :
vx - U
vy - V
vz - W
X - X in Galactocentric rectangular coordinates
Y - Y in Galactocentric rectangular coordinates
Z - Z in Galactocentric rectangular coordinates
vsun - velocity of the sun in the GC frame ndarray [ 3]
Xsun - cylindrical distance to the GC
Zsun - Sun ' s height above the midplane
galcen - if True , then X , Y , Z are in cylindrical Galactocentric coordinates rather than rectangular coordinates
_ extra _ rot = ( True ) if True , perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy ' s definition
OUTPUT :
vRg , vTg , vZg
HISTORY :
2010-09-24 - Written - Bovy ( NYU )"""
|
vxyz = vxvyvz_to_galcenrect ( vx , vy , vz , vsun = vsun , Xsun = Xsun , Zsun = Zsun , _extra_rot = _extra_rot )
return nu . array ( rect_to_cyl_vec ( vxyz [ : , 0 ] , vxyz [ : , 1 ] , vxyz [ : , 2 ] , X , Y , Z , cyl = galcen ) ) . T
|
def type_str ( self , short = False ) :
"""Returns the type of the attribute as string .
: return : the type
: rtype : str"""
|
if short :
return javabridge . static_call ( "weka/core/Attribute" , "typeToStringShort" , "(Lweka/core/Attribute;)Ljava/lang/String;" , self . jobject )
else :
return javabridge . static_call ( "weka/core/Attribute" , "typeToString" , "(Lweka/core/Attribute;)Ljava/lang/String;" , self . jobject )
|
def parse_volumes_output ( out ) :
"""Parses the output of the Docker CLI ' docker volume ls ' and returns it in the format similar to the Docker API .
: param out : CLI output .
: type out : unicode | str
: return : Parsed result .
: rtype : list [ dict ]"""
|
if not out :
return [ ]
line_iter = islice ( out . splitlines ( ) , 1 , None )
# Skip header
return list ( map ( _volume_info , line_iter ) )
|
def get_filepaths_with_extension ( extname , root_dir = '.' ) :
"""Get relative filepaths of files in a directory , and sub - directories ,
with the given extension .
Parameters
extname : ` str `
Extension name ( e . g . ' txt ' , ' rst ' ) . Extension comparison is
case - insensitive .
root _ dir : ` str ` , optional
Root directory . Current working directory by default .
Returns
filepaths : ` list ` of ` str `
File paths , relative to ` ` root _ dir ` ` , with the given extension ."""
|
# needed for comparison with os . path . splitext
if not extname . startswith ( '.' ) :
extname = '.' + extname
# for case - insensitivity
extname = extname . lower ( )
root_dir = os . path . abspath ( root_dir )
selected_filenames = [ ]
for dirname , sub_dirnames , filenames in os . walk ( root_dir ) :
for filename in filenames :
if os . path . splitext ( filename ) [ - 1 ] . lower ( ) == extname :
full_filename = os . path . join ( dirname , filename )
selected_filenames . append ( os . path . relpath ( full_filename , start = root_dir ) )
return selected_filenames
|
def _read_file ( folder , filename ) :
'''Reads and returns the contents of a file'''
|
path = os . path . join ( folder , filename )
try :
with salt . utils . files . fopen ( path , 'rb' ) as contents :
return salt . utils . data . decode ( contents . readlines ( ) )
except ( OSError , IOError ) :
return ''
|
def migrate ( gandi , resource , force , background ) :
"""Migrate a disk to another datacenter ."""
|
# check it ' s not attached
source_info = gandi . disk . info ( resource )
if source_info [ 'vms_id' ] :
click . echo ( 'Cannot start the migration: disk %s is attached. ' 'Please detach the disk before starting the migration.' % resource )
return
disk_datacenter = source_info [ 'datacenter_id' ]
dc_choices = gandi . datacenter . list_migration_choice ( disk_datacenter )
if not dc_choices :
click . echo ( 'No datacenter is available for migration' )
return
elif len ( dc_choices ) == 1 : # use the only one available
datacenter_id = dc_choices [ 0 ] [ 'id' ]
else :
choice_list = [ dc [ 'dc_code' ] for dc in dc_choices ]
dc_choice = click . Choice ( choice_list )
dc_chosen = click . prompt ( 'Select a datacenter [%s]' % '|' . join ( choice_list ) , # noqa
type = dc_choice , show_default = True )
datacenter_id = [ dc [ 'id' ] for dc in dc_choices if dc [ 'dc_code' ] == dc_chosen ] [ 0 ]
if not force :
proceed = click . confirm ( 'Are you sure you want to migrate disk %s ?' % resource )
if not proceed :
return
datacenters = gandi . datacenter . list ( )
dc_from = [ dc [ 'dc_code' ] for dc in datacenters if dc [ 'id' ] == disk_datacenter ] [ 0 ]
dc_to = [ dc [ 'dc_code' ] for dc in datacenters if dc [ 'id' ] == datacenter_id ] [ 0 ]
migration_msg = ( '* Starting the migration of disk %s from datacenter %s ' 'to %s' % ( resource , dc_from , dc_to ) )
gandi . echo ( migration_msg )
output_keys = [ 'id' , 'type' , 'step' ]
oper = gandi . disk . migrate ( resource , datacenter_id , background )
if background :
output_generic ( gandi , oper , output_keys )
return oper
|
def get_fpath ( self , cachedir = None , cfgstr = None , ext = None ) :
"""Ignore :
fname = _ fname
cfgstr = _ cfgstr"""
|
_dpath = self . get_cachedir ( cachedir )
_fname = self . get_prefix ( )
_cfgstr = self . get_cfgstr ( ) if cfgstr is None else cfgstr
_ext = self . ext if ext is None else ext
fpath = _args2_fpath ( _dpath , _fname , _cfgstr , _ext )
return fpath
|
def __parse_identities ( self , json ) :
"""Parse identities using Stackalytics format .
The Stackalytics identities format is a JSON document under the
" users " key . The document should follow the next schema :
" users " : [
" launchpad _ id " : " 0 - jsmith " ,
" gerrit _ id " : " jsmith " ,
" companies " : [
" company _ name " : " Example " ,
" end _ date " : null
" user _ name " : " John Smith " ,
" emails " : [ " jsmith @ example . com " , " jsmith @ example . net " ]
" companies " : [
" company _ name " : " Bitergia " ,
" end _ date " : null
" company _ name " : " Example " ,
" end _ date " : " 2010 - Jan - 01"
" user _ name " : " John Doe " ,
" emails " : [ " jdoe @ bitergia . com " , " jdoe @ example . com " ]
: parse json : JSON object to parse
: raise InvalidFormatError : raised when the format of the JSON is
not valid ."""
|
try :
for user in json [ 'users' ] :
name = self . __encode ( user [ 'user_name' ] )
uuid = name
uid = UniqueIdentity ( uuid = uuid )
identity = Identity ( name = name , email = None , username = None , source = self . source , uuid = uuid )
uid . identities . append ( identity )
for email_addr in user [ 'emails' ] :
email = self . __encode ( email_addr )
identity = Identity ( name = name , email = email , username = None , source = self . source , uuid = uuid )
uid . identities . append ( identity )
for site_id in [ 'gerrit_id' , 'launchpad_id' ] :
username = user . get ( site_id , None )
if not username :
continue
username = self . __encode ( username )
source = self . source + ':' + site_id . replace ( '_id' , '' )
identity = Identity ( name = name , email = None , username = username , source = source , uuid = uuid )
uid . identities . append ( identity )
for rol in self . __parse_enrollments ( user ) :
uid . enrollments . append ( rol )
self . _identities [ uuid ] = uid
except KeyError as e :
msg = "invalid json format. Attribute %s not found" % e . args
raise InvalidFormatError ( cause = msg )
|
def save_json_metadata ( self , package_info : Dict ) -> bool :
"""Take the JSON metadata we just fetched and save to disk"""
|
try :
with utils . rewrite ( self . json_file ) as jf :
dump ( package_info , jf , indent = 4 , sort_keys = True )
except Exception as e :
logger . error ( "Unable to write json to {}: {}" . format ( self . json_file , str ( e ) ) )
return False
symlink_dir = self . json_pypi_symlink . parent
if not symlink_dir . exists ( ) :
symlink_dir . mkdir ( )
try : # If symlink already exists throw a FileExistsError
self . json_pypi_symlink . symlink_to ( self . json_file )
except FileExistsError :
pass
return True
|
def _valid_comparison ( time_a , time_b , event_a , event_b ) :
"""True if times can be compared ."""
|
if time_a == time_b : # Ties are only informative if exactly one event happened
return event_a != event_b
if event_a and event_b :
return True
if event_a and time_a < time_b :
return True
if event_b and time_b < time_a :
return True
return False
|
def get_path ( self , path = '' ) :
"""Validate incoming path , if path is empty , build it from resource attributes ,
If path is invalid - raise exception
: param path : path to remote file storage
: return : valid path or : raise Exception :"""
|
if not path :
host = self . resource_config . backup_location
if ':' not in host :
scheme = self . resource_config . backup_type
if not scheme or scheme . lower ( ) == self . DEFAULT_FILE_SYSTEM . lower ( ) :
scheme = self . file_system
scheme = re . sub ( '(:|/+).*$' , '' , scheme , re . DOTALL )
host = re . sub ( '^/+' , '' , host )
host = '{}://{}' . format ( scheme , host )
path = host
url = UrlParser . parse_url ( path )
if url [ UrlParser . SCHEME ] . lower ( ) in AUTHORIZATION_REQUIRED_STORAGE :
if UrlParser . USERNAME not in url or not url [ UrlParser . USERNAME ] :
url [ UrlParser . USERNAME ] = self . resource_config . backup_user
if UrlParser . PASSWORD not in url or not url [ UrlParser . PASSWORD ] :
url [ UrlParser . PASSWORD ] = self . _api . DecryptPassword ( self . resource_config . backup_password ) . Value
try :
result = UrlParser . build_url ( url )
except Exception as e :
self . _logger . error ( 'Failed to build url: {}' . format ( e ) )
raise Exception ( 'ConfigurationOperations' , 'Failed to build path url to remote host' )
return result
|
def create_transaction ( self , outputs , fee = None , leftover = None , combine = True , message = None , unspents = None , custom_pushdata = False ) : # pragma : no cover
"""Creates a signed P2PKH transaction .
: param outputs : A sequence of outputs you wish to send in the form
` ` ( destination , amount , currency ) ` ` . The amount can
be either an int , float , or string as long as it is
a valid input to ` ` decimal . Decimal ` ` . The currency
must be : ref : ` supported < supported currencies > ` .
: type outputs : ` ` list ` ` of ` ` tuple ` `
: param fee : The number of satoshi per byte to pay to miners . By default
Bitcash will poll ` < https : / / bitcoincashfees . earn . com > ` _ and use a fee
that will allow your transaction to be confirmed as soon as
possible .
: type fee : ` ` int ` `
: param leftover : The destination that will receive any change from the
transaction . By default Bitcash will send any change to
the same address you sent from .
: type leftover : ` ` str ` `
: param combine : Whether or not Bitcash should use all available UTXOs to
make future transactions smaller and therefore reduce
fees . By default Bitcash will consolidate UTXOs .
: type combine : ` ` bool ` `
: param message : A message to include in the transaction . This will be
stored in the blockchain forever . Due to size limits ,
each message will be stored in chunks of 220 bytes .
: type message : ` ` str ` `
: param unspents : The UTXOs to use as the inputs . By default Bitcash will
communicate with the blockchain itself .
: type unspents : ` ` list ` ` of : class : ` ~ bitcash . network . meta . Unspent `
: returns : The signed transaction as hex .
: rtype : ` ` str ` `"""
|
unspents , outputs = sanitize_tx_data ( unspents or self . unspents , outputs , fee or get_fee ( ) , leftover or self . address , combine = combine , message = message , compressed = self . is_compressed ( ) , custom_pushdata = custom_pushdata )
return create_p2pkh_transaction ( self , unspents , outputs , custom_pushdata = custom_pushdata )
|
def set_input_container ( _container , cfg ) :
"""Save the input for the container in the configurations ."""
|
if not _container :
return False
if _container . exists ( ) :
cfg [ "container" ] [ "input" ] = str ( _container )
return True
return False
|
def main ( ) :
"""Generate a PDF using the async method ."""
|
docraptor = DocRaptor ( )
print ( "Create PDF" )
resp = docraptor . create ( { "document_content" : "<h1>python-docraptor</h1><p>Async Test</p>" , "test" : True , "async" : True , } )
print ( "Status ID: {status_id}" . format ( status_id = resp [ "status_id" ] ) )
status_id = resp [ "status_id" ]
resp = docraptor . status ( status_id )
print ( " {status}" . format ( status = resp [ "status" ] ) )
while resp [ "status" ] != "completed" :
time . sleep ( 3 )
resp = docraptor . status ( status_id )
print ( " {status}" . format ( status = resp [ "status" ] ) )
print ( "Download to test_async.pdf" )
with open ( "test_async.pdf" , "wb" ) as pdf_file :
pdf_file . write ( docraptor . download ( resp [ "download_key" ] ) . content )
print ( "[DONE]" )
|
def advanced_wrap ( f , wrapper ) :
"""Wrap a decorated function while keeping the same keyword arguments"""
|
f_sig = list ( inspect . getargspec ( f ) )
wrap_sig = list ( inspect . getargspec ( wrapper ) )
# Update the keyword arguments of the wrapper
if f_sig [ 3 ] is None or f_sig [ 3 ] == [ ] :
f_sig [ 3 ] , f_kwargs = [ ] , [ ]
else :
f_kwargs = f_sig [ 0 ] [ - len ( f_sig [ 3 ] ) : ]
for key , default in zip ( f_kwargs , f_sig [ 3 ] ) :
wrap_sig [ 0 ] . append ( key )
wrap_sig [ 3 ] = wrap_sig [ 3 ] + ( default , )
wrap_sig [ 2 ] = None
# Remove kwargs
src = "lambda %s: " % ( inspect . formatargspec ( * wrap_sig ) [ 1 : - 1 ] )
new_args = inspect . formatargspec ( wrap_sig [ 0 ] , wrap_sig [ 1 ] , wrap_sig [ 2 ] , f_kwargs , formatvalue = lambda x : '=' + x )
src += 'wrapper%s\n' % new_args
decorated = eval ( src , locals ( ) )
decorated . func = f
return update_wrapper ( decorated , f )
|
def resolved_packages ( self ) :
"""Return a list of PackageVariant objects , or None if the resolve did
not complete or was unsuccessful ."""
|
if ( self . status != SolverStatus . solved ) :
return None
final_phase = self . phase_stack [ - 1 ]
return final_phase . _get_solved_variants ( )
|
def _to_roman ( num ) :
"""Convert integer to roman numerals ."""
|
roman_numeral_map = ( ( 'M' , 1000 ) , ( 'CM' , 900 ) , ( 'D' , 500 ) , ( 'CD' , 400 ) , ( 'C' , 100 ) , ( 'XC' , 90 ) , ( 'L' , 50 ) , ( 'XL' , 40 ) , ( 'X' , 10 ) , ( 'IX' , 9 ) , ( 'V' , 5 ) , ( 'IV' , 4 ) , ( 'I' , 1 ) )
if not ( 0 < num < 5000 ) :
log ( WARN , 'Number out of range for roman (must be 1..4999)' )
return str ( num )
result = ''
for numeral , integer in roman_numeral_map :
while num >= integer :
result += numeral
num -= integer
return result
|
def _conn_string_odbc ( self , db_key , instance = None , conn_key = None , db_name = None ) :
'''Return a connection string to use with odbc'''
|
if instance :
dsn , host , username , password , database , driver = self . _get_access_info ( instance , db_key , db_name )
elif conn_key :
dsn , host , username , password , database , driver = conn_key . split ( ":" )
conn_str = ''
if dsn :
conn_str = 'DSN={};' . format ( dsn )
if driver :
conn_str += 'DRIVER={};' . format ( driver )
if host :
conn_str += 'Server={};' . format ( host )
if database :
conn_str += 'Database={};' . format ( database )
if username :
conn_str += 'UID={};' . format ( username )
self . log . debug ( "Connection string (before password) {}" . format ( conn_str ) )
if password :
conn_str += 'PWD={};' . format ( password )
return conn_str
|
def dispatch_job ( jobname , exe , args , opts , batch_opts , dry_run = True ) :
"""Dispatch an LSF job .
Parameters
exe : str
Execution string .
args : list
Positional arguments .
opts : dict
Dictionary of command - line options ."""
|
batch_opts . setdefault ( 'W' , 300 )
batch_opts . setdefault ( 'R' , 'rhel60 && scratch > 10' )
cmd_opts = ''
for k , v in opts . items ( ) :
if isinstance ( v , list ) :
cmd_opts += ' ' . join ( [ '--%s=%s' % ( k , t ) for t in v ] )
elif isinstance ( v , bool ) and v :
cmd_opts += ' --%s ' % ( k )
elif isinstance ( v , bool ) :
continue
elif v is not None :
cmd_opts += ' --%s=\"%s\" ' % ( k , v )
bash_script = "{exe} {args} {opts}"
scriptexe = jobname + '.sh'
with open ( os . path . join ( scriptexe ) , 'wt' ) as f :
f . write ( bash_script . format ( exe = exe , args = ' ' . join ( args ) , opts = cmd_opts ) )
batch_optstr = parse_lsf_opts ( ** batch_opts )
batch_cmd = 'bsub %s ' % ( batch_optstr )
batch_cmd += ' bash %s' % scriptexe
print ( batch_cmd )
if not dry_run :
os . system ( batch_cmd )
|
def setup_signals ( ) :
"""Set up the signal handlers ."""
|
signal . signal ( signal . SIGINT , shutit_util . ctrl_c_signal_handler )
signal . signal ( signal . SIGQUIT , shutit_util . ctrl_quit_signal_handler )
|
def canonicalize ( ctx , statement , namespace_targets , version , api , config_fn ) :
"""Canonicalize statement
Target namespaces can be provided in the following manner :
bel stmt canonicalize " < BELStmt > " - - namespace _ targets ' { " HGNC " : [ " EG " , " SP " ] , " CHEMBL " : [ " CHEBI " ] } '
the value of target _ namespaces must be JSON and embedded in single quotes
reserving double quotes for the dictionary elements"""
|
if config_fn :
config = bel . db . Config . merge_config ( ctx . config , override_config_fn = config_fn )
else :
config = ctx . config
# Configuration - will return the first truthy result in list else the default option
if namespace_targets :
namespace_targets = json . loads ( namespace_targets )
namespace_targets = utils . first_true ( [ namespace_targets , config . get ( "canonical" ) ] , None )
api = utils . first_true ( [ api , config . get ( "api" , None ) ] , None )
version = utils . first_true ( [ version , config . get ( "bel_version" , None ) ] , None )
print ( "------------------------------" )
print ( "BEL version: {}" . format ( version ) )
print ( "API Endpoint: {}" . format ( api ) )
print ( "------------------------------" )
bo = BEL ( version = version , endpoint = api )
bo . parse ( statement ) . canonicalize ( namespace_targets = namespace_targets )
if bo . ast is None :
print ( bo . original_bel_stmt )
print ( bo . parse_visualize_error )
print ( bo . validation_messages )
else :
print ( "ORIGINAL " , bo . original_bel_stmt )
print ( "CANONICAL" , bo . ast )
if bo . validation_messages :
print ( bo . validation_messages )
else :
print ( "No problems found" )
return
|
def give_repr ( cls ) : # pragma : no cover
r"""Patch a class to give it a generic _ _ repr _ _ method
that works by inspecting the instance dictionary .
Parameters
cls : type
The class to add a generic _ _ repr _ _ to .
Returns
cls : type
The passed class is returned"""
|
def reprer ( self ) :
attribs = ', ' . join ( [ "%s=%r" % ( k , v ) for k , v in self . __dict__ . items ( ) if not k . startswith ( "_" ) ] )
wrap = "{self.__class__.__name__}({attribs})" . format ( self = self , attribs = attribs )
return wrap
cls . __repr__ = reprer
return cls
|
def writesgf ( self , sgffilename ) :
"Write the game to an SGF file after a game"
|
size = self . size
outfile = open ( sgffilename , "w" )
if not outfile :
print "Couldn't create " + sgffilename
return
black_name = self . blackplayer . get_program_name ( )
white_name = self . whiteplayer . get_program_name ( )
black_seed = self . blackplayer . get_random_seed ( )
white_seed = self . whiteplayer . get_random_seed ( )
handicap = self . handicap
komi = self . komi
result = self . resultw
outfile . write ( "(;GM[1]FF[4]RU[Japanese]SZ[%s]HA[%s]KM[%s]RE[%s]\n" % ( size , handicap , komi , result ) )
outfile . write ( "PW[%s (random seed %s)]PB[%s (random seed %s)]\n" % ( white_name , white_seed , black_name , black_seed ) )
outfile . write ( self . sgffilestart )
if handicap > 1 :
outfile . write ( "AB" ) ;
for stone in self . handicap_stones :
outfile . write ( "[%s]" % ( coords_to_sgf ( size , stone ) ) )
outfile . write ( "PL[W]\n" )
to_play = self . first_to_play
for move in self . moves :
sgfmove = coords_to_sgf ( size , move )
outfile . write ( ";%s[%s]\n" % ( to_play , sgfmove ) )
if to_play == "B" :
to_play = "W"
else :
to_play = "B"
outfile . write ( ")\n" )
outfile . close
|
async def load ( self , file_path , locale = None , key : int = 0 , pos : int = 1 , neg : Optional [ ColRanges ] = None ) :
"""Start the loading / watching process"""
|
if neg is None :
neg : ColRanges = [ ( 2 , None ) ]
await self . start ( file_path , locale , kwargs = { 'key' : key , 'pos' : pos , 'neg' : neg , } )
|
def dom_table ( self ) :
"""A ` Table ` containing DOM attributes"""
|
if self . _dom_table is None :
data = defaultdict ( list )
for dom_id , ( du , floor , _ ) in self . doms . items ( ) :
data [ 'dom_id' ] . append ( dom_id )
data [ 'du' ] . append ( du )
data [ 'floor' ] . append ( floor )
dom_position = self . dom_positions [ dom_id ]
data [ 'pos_x' ] . append ( dom_position [ 0 ] )
data [ 'pos_y' ] . append ( dom_position [ 1 ] )
data [ 'pos_z' ] . append ( dom_position [ 2 ] )
self . _dom_table = Table ( data , name = 'DOMs' , h5loc = '/dom_table' )
return self . _dom_table
|
def ae ( actual , predicted ) :
"""Computes the absolute error .
This function computes the absolute error between two numbers ,
or for element between a pair of lists or numpy arrays .
Parameters
actual : int , float , list of numbers , numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
score : double or list of doubles
The absolute error between actual and predicted"""
|
return np . abs ( np . array ( actual ) - np . array ( predicted ) )
|
def sargasso_stats_table ( self ) :
"""Take the parsed stats from the sargasso report and add them to the
basic stats table at the top of the report"""
|
headers = OrderedDict ( )
headers [ 'sargasso_percent_assigned' ] = { 'title' : '% Assigned' , 'description' : 'Sargasso % Assigned reads' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'scale' : 'RdYlGn' }
headers [ 'Assigned-Reads' ] = { 'title' : '{} Assigned' . format ( config . read_count_prefix ) , 'description' : 'Sargasso Assigned reads ({})' . format ( config . read_count_desc ) , 'min' : 0 , 'scale' : 'PuBu' , 'modify' : lambda x : float ( x ) * config . read_count_multiplier , 'shared_key' : 'read_count' }
self . general_stats_addcols ( self . sargasso_data , headers )
|
def elapse_time ( start , end = None , precision = 3 ) :
"""Simple time calculation utility . Given a start time , it will provide an elapse time ."""
|
if end is None :
end = time_module . time ( )
return round ( end - start , precision )
|
def get_chacra_repo ( shaman_url ) :
"""From a Shaman URL , get the chacra url for a repository , read the
contents that point to the repo and return it as a string ."""
|
shaman_response = get_request ( shaman_url )
chacra_url = shaman_response . geturl ( )
chacra_response = get_request ( chacra_url )
return chacra_response . read ( )
|
def _pseudo_parse_arglist ( signode , arglist ) :
"""Parse list of comma separated arguments .
Arguments can have optional types ."""
|
paramlist = addnodes . desc_parameterlist ( )
stack = [ paramlist ]
try :
for argument in arglist . split ( ',' ) :
argument = argument . strip ( )
ends_open = 0
ends_close = 0
while argument . startswith ( '[' ) :
stack . append ( addnodes . desc_optional ( ) )
stack [ - 2 ] += stack [ - 1 ]
argument = argument [ 1 : ] . strip ( )
while argument . startswith ( ']' ) :
stack . pop ( )
argument = argument [ 1 : ] . strip ( )
while argument . endswith ( ']' ) and not argument . endswith ( '[]' ) :
ends_close += 1
argument = argument [ : - 1 ] . strip ( )
while argument . endswith ( '[' ) :
ends_open += 1
argument = argument [ : - 1 ] . strip ( )
if argument :
stack [ - 1 ] += addnodes . desc_parameter ( argument , argument )
while ends_open :
stack . append ( addnodes . desc_optional ( ) )
stack [ - 2 ] += stack [ - 1 ]
ends_open -= 1
while ends_close :
stack . pop ( )
ends_close -= 1
if len ( stack ) != 1 :
raise IndexError
except IndexError : # If there are too few or too many elements on the stack , just give
# up and treat the whole argument list as one argument , discarding
# the already partially populated paramlist node .
signode += addnodes . desc_parameterlist ( )
signode [ - 1 ] += addnodes . desc_parameter ( arglist , arglist )
else :
signode += paramlist
|
def sync_state ( self ) :
"""Syncs the internal Pybullet robot state to the joint positions of the
robot being controlled ."""
|
# sync IK robot state to the current robot joint positions
self . sync_ik_robot ( self . robot_jpos_getter ( ) )
# make sure target pose is up to date
pos_r , orn_r , pos_l , orn_l = self . ik_robot_eef_joint_cartesian_pose ( )
self . ik_robot_target_pos_right = pos_r
self . ik_robot_target_orn_right = orn_r
self . ik_robot_target_pos_left = pos_l
self . ik_robot_target_orn_left = orn_l
|
def _get_library_os_path_from_library_dict_tree ( self , library_path , library_name ) :
"""Hand verified library os path from libraries dictionary tree ."""
|
if library_path is None or library_name is None :
return None
path_list = library_path . split ( os . sep )
target_lib_dict = self . libraries
# go down the path to the correct library
for path_element in path_list :
if path_element not in target_lib_dict : # Library cannot be found
target_lib_dict = None
break
target_lib_dict = target_lib_dict [ path_element ]
return None if target_lib_dict is None or library_name not in target_lib_dict else target_lib_dict [ library_name ]
|
def logReload ( options ) :
"""encompasses all the logic for reloading observer ."""
|
event_handler = Reload ( options )
observer = Observer ( )
observer . schedule ( event_handler , path = '.' , recursive = True )
observer . start ( )
try :
while True :
time . sleep ( 1 )
except KeyboardInterrupt :
observer . stop ( )
pid = os . getpid ( )
chalk . eraser ( )
chalk . green ( '\nHendrix successfully closed.' )
os . kill ( pid , 15 )
observer . join ( )
exit ( '\n' )
|
def get_edge_citation ( self , u : BaseEntity , v : BaseEntity , key : str ) -> Optional [ CitationDict ] :
"""Get the citation for a given edge ."""
|
return self . _get_edge_attr ( u , v , key , CITATION )
|
def get_payload ( self ) :
"""Return Payload ."""
|
ret = bytes ( [ len ( self . scenes ) ] )
for number , name in self . scenes :
ret += bytes ( [ number ] )
ret += string_to_bytes ( name , 64 )
ret += bytes ( [ self . remaining_scenes ] )
return ret
|
def JMS_to_FormFlavor_lep ( C , dd ) :
"""From JMS to semileptonic Fierz basis for Classes V .
C should be the JMS basis and ` ddll ` should be of the
form ' sbl _ eni _ tau ' , ' dbl _ munu _ e ' etc ."""
|
b = dflav [ dd [ 0 ] ]
s = dflav [ dd [ 1 ] ]
return { 'CVLL_' + dd + 'mm' : C [ "VedLL" ] [ 1 , 1 , s , b ] , 'CVRR_' + dd + 'mm' : C [ "VedRR" ] [ 1 , 1 , s , b ] , 'CVLR_' + dd + 'mm' : C [ "VdeLR" ] [ s , b , 1 , 1 ] , 'CVRL_' + dd + 'mm' : C [ "VedLR" ] [ 1 , 1 , s , b ] , 'CSLL_' + dd + 'mm' : C [ "SedRR" ] [ 1 , 1 , b , s ] . conj ( ) , 'CSRR_' + dd + 'mm' : C [ "SedRR" ] [ 1 , 1 , s , b ] , 'CSLR_' + dd + 'mm' : C [ "SedRL" ] [ 1 , 1 , s , b ] , 'CSRL_' + dd + 'mm' : C [ "SedRL" ] [ 1 , 1 , b , s ] . conj ( ) , 'CTLL_' + dd + 'mm' : C [ "TedRR" ] [ 1 , 1 , b , s ] . conj ( ) , 'CTRR_' + dd + 'mm' : C [ "TedRR" ] [ 1 , 1 , s , b ] , 'CVLL_sdnn' : 1 / 3 * C [ "VnudLL" ] [ 0 , 0 , s - 1 , s ] + 1 / 3 * C [ "VnudLL" ] [ 1 , 1 , s - 1 , s ] + 1 / 3 * C [ "VnudLL" ] [ 2 , 2 , s - 1 , s ] , 'CVRL_sdnn' : 1 / 3 * C [ "VnudLR" ] [ 0 , 0 , s - 1 , s ] + 1 / 3 * C [ "VnudLR" ] [ 1 , 1 , s - 1 , s ] + 1 / 3 * C [ "VnudLR" ] [ 2 , 2 , s - 1 , s ] }
|
def nla_put_nested ( msg , attrtype , nested ) :
"""Add nested attributes to Netlink message .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / attr . c # L772
Takes the attributes found in the ` nested ` message and appends them to the message ` msg ` nested in a container of
the type ` attrtype ` . The ` nested ` message may not have a family specific header .
Positional arguments :
msg - - Netlink message ( nl _ msg class instance ) .
attrtype - - attribute type ( integer ) .
nested - - message containing attributes to be nested ( nl _ msg class instance ) .
Returns :
0 on success or a negative error code ."""
|
_LOGGER . debug ( 'msg 0x%x: attr <> %d: adding msg 0x%x as nested attribute' , id ( msg ) , attrtype , id ( nested ) )
return nla_put ( msg , attrtype , nlmsg_datalen ( nested . nm_nlh ) , nlmsg_data ( nested . nm_nlh ) )
|
def cdl_addmon ( self , source_url , save_path = '/' , timeout = 3600 ) :
'''Usage : cdl _ addmon < source _ url > [ save _ path ] [ timeout ] - add an offline ( cloud ) download task and monitor the download progress
source _ url - the URL to download file from .
save _ path - path on PCS to save file to . default is to save to root directory ' / ' .
timeout - timeout in seconds . default is 3600 seconds .'''
|
rpath = self . __get_cdl_dest ( source_url , save_path )
return self . __cdl_addmon ( source_url , rpath , timeout )
|
def tensor_dim_to_mesh_dim_size ( layout , mesh_shape , tensor_dim ) :
"""How many ways does a tensor dimension get split .
This is used to " cheat " when building the mtf graph and peek at how a
tensor dimension will be split . Returns 1 if the tensor dimension is not
split .
Args :
layout : an input to convert _ to _ layout _ rules
mesh _ shape : an input to convert _ to _ shape
tensor _ dim : a Dimension
Returns :
an integer"""
|
layout_rules = convert_to_layout_rules ( layout )
mesh_shape = convert_to_shape ( mesh_shape )
mesh_axis = layout_rules . tensor_dimension_to_mesh_axis ( tensor_dim , mesh_shape )
if mesh_axis is None :
return 1
else :
return mesh_shape . dims [ mesh_axis ] . size
|
def handle ( send , msg , args ) :
"""Implements several XKCD comics ."""
|
output = textutils . gen_xkcd_sub ( msg , True )
if output is None :
return
if args [ 'type' ] == 'action' :
send ( "correction: * %s %s" % ( args [ 'nick' ] , output ) )
else :
send ( "%s actually meant: %s" % ( args [ 'nick' ] , output ) )
|
def main ( ) :
"""Main script function ."""
|
args = get_args ( )
process_args ( args )
if not args . no_display :
disp = display . init ( args . size )
client = song . init ( args . port , args . server )
while True :
song . get_art ( args . cache_dir , args . size , client )
if not args . no_display :
display . launch ( disp , args . cache_dir / "current.jpg" )
client . send_idle ( )
if client . fetch_idle ( [ "player" ] ) :
print ( "album: Received player event from mpd. Swapping cover art." )
continue
|
def extend_instance ( instance , * bases , ** kwargs ) :
"""Apply subclass ( mixin ) to a class object or its instance
By default , the mixin is placed at the start of bases
to ensure its called first as per MRO . If you wish to
have it injected last , which is useful for monkeypatching ,
then you can specify ' last = True ' . See here :
http : / / stackoverflow . com / a / 10018792/1267398
: attr cls : Target object
: type cls : Class instance
: attr bases : List of new bases to subclass with
: attr last : Inject new bases after existing bases
: type last : bool
> > > class A ( object ) : pass
> > > class B ( object ) : pass
> > > a = A ( )
> > > b = B ( )
> > > isinstance ( b , A )
False
> > > extend _ instance ( b , A )
> > > isinstance ( b , A )
True"""
|
last = kwargs . get ( 'last' , False )
bases = tuple ( bases )
for base in bases :
assert inspect . isclass ( base ) , "bases must be classes"
assert not inspect . isclass ( instance )
base_cls = instance . __class__
base_cls_name = instance . __class__ . __name__
new_bases = ( base_cls , ) + bases if last else bases + ( base_cls , )
new_cls = type ( base_cls_name , tuple ( new_bases ) , { } )
setattr ( instance , '__class__' , new_cls )
|
def _check ( self ) :
"""Checks if the message parameters are valid .
Assumes that the types are already correct .
: raises ValueError : iff one or more attributes are invalid"""
|
if self . timestamp < 0.0 :
raise ValueError ( "the timestamp may not be negative" )
if isinf ( self . timestamp ) :
raise ValueError ( "the timestamp may not be infinite" )
if isnan ( self . timestamp ) :
raise ValueError ( "the timestamp may not be NaN" )
if self . is_remote_frame and self . is_error_frame :
raise ValueError ( "a message cannot be a remote and an error frame at the sane time" )
if self . arbitration_id < 0 :
raise ValueError ( "arbitration IDs may not be negative" )
if self . is_extended_id :
if 0x20000000 <= self . arbitration_id :
raise ValueError ( "Extended arbitration IDs must be less than 2^29" )
elif 0x800 <= self . arbitration_id :
raise ValueError ( "Normal arbitration IDs must be less than 2^11" )
if self . dlc < 0 :
raise ValueError ( "DLC may not be negative" )
if self . is_fd :
if 64 < self . dlc :
raise ValueError ( "DLC was {} but it should be <= 64 for CAN FD frames" . format ( self . dlc ) )
elif 8 < self . dlc :
raise ValueError ( "DLC was {} but it should be <= 8 for normal CAN frames" . format ( self . dlc ) )
if self . is_remote_frame :
if self . data is not None and len ( self . data ) != 0 :
raise ValueError ( "remote frames may not carry any data" )
elif self . dlc != len ( self . data ) :
raise ValueError ( "the DLC and the length of the data must match up for non remote frames" )
if not self . is_fd :
if self . bitrate_switch :
raise ValueError ( "bitrate switch is only allowed for CAN FD frames" )
if self . error_state_indicator :
raise ValueError ( "error state indicator is only allowed for CAN FD frames" )
|
def Descargar ( self , url = URL , filename = "padron.txt" , proxy = None ) :
"Descarga el archivo de AFIP , devuelve 200 o 304 si no fue modificado"
|
proxies = { }
if proxy :
proxies [ 'http' ] = proxy
proxies [ 'https' ] = proxy
proxy_handler = urllib2 . ProxyHandler ( proxies )
print "Abriendo URL %s ..." % url
req = urllib2 . Request ( url )
if os . path . exists ( filename ) :
http_date = formatdate ( timeval = os . path . getmtime ( filename ) , localtime = False , usegmt = True )
req . add_header ( 'If-Modified-Since' , http_date )
try :
web = urllib2 . urlopen ( req )
except urllib2 . HTTPError , e :
if e . code == 304 :
print "No modificado desde" , http_date
return 304
else :
raise
# leer info del request :
meta = web . info ( )
lenght = float ( meta [ 'Content-Length' ] )
date = meta [ 'Last-Modified' ]
tmp = open ( filename + ".zip" , "wb" )
print "Guardando"
size = 0
p0 = None
while True :
p = int ( size / lenght * 100 )
if p0 is None or p > p0 :
print "Leyendo ... %0d %%" % p
p0 = p
data = web . read ( 1024 * 100 )
size = size + len ( data )
if not data :
print "Descarga Terminada!"
break
tmp . write ( data )
print "Abriendo ZIP..."
tmp . close ( )
web . close ( )
uf = open ( filename + ".zip" , "rb" )
zf = zipfile . ZipFile ( uf )
for fn in zf . namelist ( ) :
print "descomprimiendo" , fn
tf = open ( filename , "wb" )
tf . write ( zf . read ( fn ) )
tf . close ( )
return 200
|
def _generate_list_heading ( self ) :
"""Generate the list of heading links of page ."""
|
local = self . parser . find ( 'body' ) . first_result ( )
id_container_heading_before = ( AccessibleNavigationImplementation . ID_CONTAINER_HEADING_BEFORE )
id_container_heading_after = ( AccessibleNavigationImplementation . ID_CONTAINER_HEADING_AFTER )
if local is not None :
container_before = self . parser . find ( '#' + id_container_heading_before ) . first_result ( )
if ( container_before is None ) and ( self . elements_heading_before ) :
container_before = self . parser . create_element ( 'div' )
container_before . set_attribute ( 'id' , id_container_heading_before )
text_container_before = self . parser . create_element ( 'span' )
text_container_before . set_attribute ( 'class' , AccessibleNavigationImplementation . CLASS_TEXT_HEADING )
text_container_before . append_text ( self . elements_heading_before )
container_before . append_element ( text_container_before )
local . prepend_element ( container_before )
if container_before is not None :
self . list_heading_before = self . parser . find ( container_before ) . find_children ( 'ol' ) . first_result ( )
if self . list_heading_before is None :
self . list_heading_before = self . parser . create_element ( 'ol' )
container_before . append_element ( self . list_heading_before )
container_after = self . parser . find ( '#' + id_container_heading_after ) . first_result ( )
if ( container_after is None ) and ( self . elements_heading_after ) :
container_after = self . parser . create_element ( 'div' )
container_after . set_attribute ( 'id' , id_container_heading_after )
text_container_after = self . parser . create_element ( 'span' )
text_container_after . set_attribute ( 'class' , AccessibleNavigationImplementation . CLASS_TEXT_HEADING )
text_container_after . append_text ( self . elements_heading_after )
container_after . append_element ( text_container_after )
local . append_element ( container_after )
if container_after is not None :
self . list_heading_after = self . parser . find ( container_after ) . find_children ( 'ol' ) . first_result ( )
if self . list_heading_after is None :
self . list_heading_after = self . parser . create_element ( 'ol' )
container_after . append_element ( self . list_heading_after )
self . list_heading_added = True
|
def to_dataframe ( self , stimuli = None , inhibitors = None , prepend = "" ) :
"""Converts the list of clampigns to a ` pandas . DataFrame ` _ object instance
Parameters
stimuli : Optional [ list [ str ] ]
List of stimuli names . If given , stimuli are converted to { 0,1 } instead of { - 1,1 } .
inhibitors : Optional [ list [ str ] ]
List of inhibitors names . If given , inhibitors are renamed and converted to { 0,1 } instead of { - 1,1 } .
prepend : str
Columns are renamed using the given string at the beginning
Returns
` pandas . DataFrame ` _
DataFrame representation of the list of clampings
. . _ pandas . DataFrame : http : / / pandas . pydata . org / pandas - docs / stable / dsintro . html # dataframe"""
|
stimuli , inhibitors = stimuli or [ ] , inhibitors or [ ]
cues = stimuli + inhibitors
nc = len ( cues )
ns = len ( stimuli )
variables = cues or np . array ( list ( set ( ( v for ( v , s ) in it . chain . from_iterable ( self ) ) ) ) )
matrix = np . array ( [ ] )
for clamping in self :
arr = clamping . to_array ( variables )
if nc > 0 :
arr [ np . where ( arr [ : ns ] == - 1 ) [ 0 ] ] = 0
arr [ ns + np . where ( arr [ ns : ] == - 1 ) [ 0 ] ] = 1
if len ( matrix ) :
matrix = np . append ( matrix , [ arr ] , axis = 0 )
else :
matrix = np . array ( [ arr ] )
return pd . DataFrame ( matrix , columns = [ prepend + "%s" % c for c in ( stimuli + [ i + 'i' for i in inhibitors ] if nc > 0 else variables ) ] )
|
def _build_object_type ( var , property_path = None ) :
"""Builds schema definitions for object type values .
: param var : The object type value
: param List [ str ] property _ path : The property path of the current type ,
defaults to None , optional
: param property _ path : [ type ] , optional
: return : The built schema definition
: rtype : Dict [ str , Any ]"""
|
if not property_path :
property_path = [ ]
schema = { "type" : "object" }
if is_builtin_type ( var ) :
return schema
entry = var . metadata [ CONFIG_KEY ]
if isinstance ( entry . min , int ) :
schema [ "minProperties" ] = entry . min
if isinstance ( entry . max , int ) :
schema [ "maxProperties" ] = entry . max
# NOTE : typing . Dict only accepts two typing arguments
if is_typing_type ( var . type ) and len ( var . type . __args__ ) == 2 :
( key_type , value_type ) = var . type . __args__
key_pattern = "^(.*)$"
if is_regex_type ( key_type ) :
key_pattern = key_type . __supertype__ . pattern
elif not is_string_type ( key_type ) :
raise ValueError ( f"cannot serialize object with key of type {key_type!r}, " f"located in var {var.name!r}" )
schema [ "patternProperties" ] = { key_pattern : _build ( value_type , property_path = property_path ) }
return schema
|
def rethreshold ( self , new_threshold , new_threshold_type = 'MAD' ) :
"""Remove detections from the Party that are below a new threshold .
. . Note : : threshold can only be set higher .
. . Warning : :
Works in place on Party .
: type new _ threshold : float
: param new _ threshold : New threshold level
: type new _ threshold _ type : str
: param new _ threshold _ type : Either ' MAD ' , ' absolute ' or ' av _ chan _ corr '
. . rubric : : Examples
Using the MAD threshold on detections made using the MAD threshold :
> > > party = Party ( ) . read ( )
> > > len ( party )
> > > party = party . rethreshold ( 10.0)
> > > len ( party )
> > > # Note that all detections are self detections
Using the absolute thresholding method on the same Party :
> > > party = Party ( ) . read ( ) . rethreshold ( 6.0 , ' absolute ' )
> > > len ( party )
Using the av _ chan _ corr method on the same Party :
> > > party = Party ( ) . read ( ) . rethreshold ( 0.9 , ' av _ chan _ corr ' )
> > > len ( party )"""
|
for family in self . families :
rethresh_detections = [ ]
for d in family . detections :
if new_threshold_type == 'MAD' and d . threshold_type == 'MAD' :
new_thresh = ( d . threshold / d . threshold_input ) * new_threshold
elif new_threshold_type == 'MAD' and d . threshold_type != 'MAD' :
raise MatchFilterError ( 'Cannot recalculate MAD level, ' 'use another threshold type' )
elif new_threshold_type == 'absolute' :
new_thresh = new_threshold
elif new_threshold_type == 'av_chan_corr' :
new_thresh = new_threshold * d . no_chans
else :
raise MatchFilterError ( 'new_threshold_type %s is not recognised' % str ( new_threshold_type ) )
if d . detect_val >= new_thresh :
d . threshold = new_thresh
d . threshold_input = new_threshold
d . threshold_type = new_threshold_type
rethresh_detections . append ( d )
family . detections = rethresh_detections
return self
|
def get_ec2_role ( self , role , mount_point = 'aws-ec2' ) :
"""GET / auth / < mount _ point > / role / < role >
: param role :
: type role :
: param mount _ point :
: type mount _ point :
: return :
: rtype :"""
|
return self . _adapter . get ( '/v1/auth/{0}/role/{1}' . format ( mount_point , role ) ) . json ( )
|
def remove_subscriber ( self , message ) :
"""Remove a subscriber based on token .
: param message : the message"""
|
logger . debug ( "Remove Subcriber" )
host , port = message . destination
key_token = hash ( str ( host ) + str ( port ) + str ( message . token ) )
try :
self . _relations [ key_token ] . transaction . completed = True
del self . _relations [ key_token ]
except KeyError :
logger . warning ( "No Subscriber" )
|
def load_class_by_path ( taskpath ) :
"""Given a taskpath , returns the main task class ."""
|
return getattr ( importlib . import_module ( re . sub ( r"\.[^.]+$" , "" , taskpath ) ) , re . sub ( r"^.*\." , "" , taskpath ) )
|
def list_data ( self , previous_data = False , prompt = False , console_row = False , console_row_to_cursor = False , console_row_from_cursor = False ) :
"""Return list of strings . Where each string is fitted to windows width . Parameters are the same as
they are in : meth : ` . WConsoleWindow . data ` method
: return : list of str"""
|
return self . split ( self . data ( previous_data , prompt , console_row , console_row_to_cursor , console_row_from_cursor ) )
|
def type ( self ) :
"""Read - only . A member of : ref : ` MsoColorType ` , one of RGB , THEME , or
AUTO , corresponding to the way this color is defined . Its value is
| None | if no color is applied at this level , which causes the
effective color to be inherited from the style hierarchy ."""
|
color = self . _color
if color is None :
return None
if color . themeColor is not None :
return MSO_COLOR_TYPE . THEME
if color . val == ST_HexColorAuto . AUTO :
return MSO_COLOR_TYPE . AUTO
return MSO_COLOR_TYPE . RGB
|
def _PrintDatabaseTable ( self , tableName , rowSelect = None ) :
"""Prints contents of database table . An optional argument ( rowSelect ) can be
given which contains a list of column names and values against which to
search , allowing a subset of the table to be printed .
Gets database column headings using PRAGMA call . Automatically adjusts
each column width based on the longest element that needs to be printed
Parameters
tableName : int
Name of table to print .
rowSelect : list of tuples
A list of column names and values against to search against .
Returns :
int
The number of table rows printed ."""
|
goodlogging . Log . Info ( "DB" , "{0}" . format ( tableName ) )
goodlogging . Log . IncreaseIndent ( )
tableInfo = self . _ActionDatabase ( "PRAGMA table_info({0})" . format ( tableName ) )
dbQuery = "SELECT * FROM {0}" . format ( tableName )
dbQueryParams = [ ]
if rowSelect is not None :
dbQuery = dbQuery + " WHERE " + ' AND ' . join ( [ '{0}=?' . format ( i ) for i , j in rowSelect ] )
dbQueryParams = [ j for i , j in rowSelect ]
tableData = self . _ActionDatabase ( dbQuery , dbQueryParams )
columnCount = len ( tableInfo )
columnWidths = [ 0 ] * columnCount
columnHeadings = [ ]
for count , column in enumerate ( tableInfo ) :
columnHeadings . append ( column [ 1 ] )
columnWidths [ count ] = len ( column [ 1 ] )
for row in tableData :
for count , column in enumerate ( row ) :
if len ( str ( column ) ) > columnWidths [ count ] :
columnWidths [ count ] = len ( column )
printStr = "|"
for count , column in enumerate ( columnWidths ) :
printStr = printStr + " {{0[{0}]:{1}}} |" . format ( count , columnWidths [ count ] )
goodlogging . Log . Info ( "DB" , printStr . format ( columnHeadings ) )
goodlogging . Log . Info ( "DB" , "-" * ( sum ( columnWidths ) + 3 * len ( columnWidths ) + 1 ) )
for row in tableData :
noneReplacedRow = [ '-' if i is None else i for i in row ]
goodlogging . Log . Info ( "DB" , printStr . format ( noneReplacedRow ) )
goodlogging . Log . DecreaseIndent ( )
goodlogging . Log . NewLine ( )
return len ( tableData )
|
def split_overlaps ( self ) :
"""Finds all intervals with overlapping ranges and splits them
along the range boundaries .
Completes in worst - case O ( n ^ 2 * log n ) time ( many interval
boundaries are inside many intervals ) , best - case O ( n * log n )
time ( small number of overlaps < < n per interval ) ."""
|
if not self :
return
if len ( self . boundary_table ) == 2 :
return
bounds = sorted ( self . boundary_table )
# get bound locations
new_ivs = set ( )
for lbound , ubound in zip ( bounds [ : - 1 ] , bounds [ 1 : ] ) :
for iv in self [ lbound ] :
new_ivs . add ( Interval ( lbound , ubound , iv . data ) )
self . __init__ ( new_ivs )
|
def _aggop ( self , query ) :
"""SINGLE ROW RETURNED WITH AGGREGATES"""
|
if isinstance ( query . select , list ) : # RETURN SINGLE OBJECT WITH AGGREGATES
for s in query . select :
if s . aggregate not in aggregates :
Log . error ( "Expecting all columns to have an aggregate: {{select}}" , select = s )
selects = FlatList ( )
for s in query . select :
selects . append ( sql_alias ( aggregates [ s . aggregate ] . replace ( "{{code}}" , s . value ) , quote_column ( s . name ) ) )
sql = expand_template ( """
SELECT
{{selects}}
FROM
{{table}}
{{where}}
""" , { "selects" : SQL ( ",\n" . join ( selects ) ) , "table" : self . _subquery ( query [ "from" ] ) [ 0 ] , "where" : self . _where2sql ( query . filter ) } )
return sql , lambda sql : self . db . column ( sql ) [ 0 ]
# RETURNING SINGLE OBJECT WITH AGGREGATE VALUES
else : # RETURN SINGLE VALUE
s0 = query . select
if s0 . aggregate not in aggregates :
Log . error ( "Expecting all columns to have an aggregate: {{select}}" , select = s0 )
select = sql_alias ( aggregates [ s0 . aggregate ] . replace ( "{{code}}" , s0 . value ) , quote_column ( s0 . name ) )
sql = expand_template ( """
SELECT
{{selects}}
FROM
{{table}}
{{where}}
""" , { "selects" : SQL ( select ) , "table" : self . _subquery ( query [ "from" ] ) [ 0 ] , "where" : self . _where2sql ( query . where ) } )
def post ( sql ) :
result = self . db . column_query ( sql )
return result [ 0 ] [ 0 ]
return sql , post
|
def _LinearFoldByteStream ( self , mapped_value , ** unused_kwargs ) :
"""Folds the data type into a byte stream .
Args :
mapped _ value ( object ) : mapped value .
Returns :
bytes : byte stream .
Raises :
FoldingError : if the data type definition cannot be folded into
the byte stream ."""
|
try :
attribute_values = [ getattr ( mapped_value , attribute_name , None ) for attribute_name in self . _attribute_names ]
attribute_values = [ value for value in attribute_values if value is not None ]
return self . _operation . WriteTo ( tuple ( attribute_values ) )
except Exception as exception :
error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}' ) . format ( self . _data_type_definition . name , exception )
raise errors . FoldingError ( error_string )
|
def get_ligand_ring_selection ( self , ring ) :
"""MDAnalysis atom selections of aromatic rings present in the ligand molecule .
Takes :
* ring * - index in self . ligrings dictionary
Output :
* ring _ selection * - MDAnalysis Atom group"""
|
ring_names = ""
for atom in self . ligrings [ ring ] :
ring_names = ring_names + " " + str ( atom )
ring_selection = self . topology_data . universe . ligand . select_atoms ( "name " + ring_names )
return ring_selection
|
def provider ( container , cache , name = None ) :
"""A decorator to register a provider on a container .
For more information see : meth : ` Container . add _ provider ` ."""
|
def register ( provider ) :
container . add_provider ( provider , cache , name )
return provider
return register
|
def hash_function ( self ) :
"""Returns the hash function proper . Ensures that ` self ` is not bound to
the returned closure ."""
|
assert hasattr ( self , 'f1' ) and hasattr ( self , 'f2' )
# These are not just convenient aliases for the given
# attributes ; if ` self ` would creep into the returned closure ,
# that would ensure that a reference to this big , fat object
# would be kept alive ; hence , any hash function would carry
# around all of the auxiliary state that was created during the
# generation of the hash parameters . Omitting ` self ` ensures
# this object has a chance to be garbage collected .
f1 , f2 , g = self . f1 , self . f2 , self . g
def czech_hash ( word ) :
v1 = f1 ( word )
v2 = f2 ( word )
return g [ v1 ] + g [ v2 ]
return czech_hash
|
def intersection ( self , * others ) :
"""Return the intersection of two or more sets as a new set .
> > > from ngram import NGram
> > > a = NGram ( [ ' spam ' , ' eggs ' ] )
> > > b = NGram ( [ ' spam ' , ' ham ' ] )
> > > list ( a . intersection ( b ) )
[ ' spam ' ]"""
|
return self . copy ( super ( NGram , self ) . intersection ( * others ) )
|
def attr ( aid ) :
'''Action function generator to retrieve an attribute from the current link'''
|
def _attr ( ctx ) :
return ctx . current_link [ ATTRIBUTES ] . get ( aid )
return _attr
|
def k2a ( a , x ) :
"""Rescale data from a K object x to array a ."""
|
func , scale = None , 1
t = abs ( x . _t )
# timestamp ( 12 ) , month ( 13 ) , date ( 14 ) or datetime ( 15)
if 12 <= t <= 15 :
unit = get_unit ( a )
attr , shift , func , scale = _UNIT [ unit ]
a [ : ] = getattr ( x , attr ) . data
a += shift
# timespan ( 16 ) , minute ( 17 ) , second ( 18 ) or time ( 19)
elif 16 <= t <= 19 :
unit = get_unit ( a )
func , scale = _SCALE [ unit ]
a [ : ] = x . timespan . data
else :
a [ : ] = list ( x )
if func is not None :
func = getattr ( numpy , func )
a [ : ] = func ( a . view ( dtype = 'i8' ) , scale )
if a . dtype . char in 'mM' :
n = x . null
if n . any :
a [ n ] = None
|
def run_checks ( self , b , compute , times = [ ] , ** kwargs ) :
"""run any sanity checks to make sure the parameters and options are legal
for this backend . If they are not , raise an error here to avoid errors
within the workers .
Any physics - checks that are backend - independent should be in
Bundle . run _ checks , and don ' t need to be repeated here .
This should be subclassed by all backends , otherwise will throw a
NotImplementedError"""
|
raise NotImplementedError ( "run_checks is not implemented by the {} backend" . format ( self . __class__ . __name__ ) )
|
def rotx ( t ) :
"""Rotation about the x - axis ."""
|
c = np . cos ( t )
s = np . sin ( t )
return np . array ( [ [ 1 , 0 , 0 ] , [ 0 , c , - s ] , [ 0 , s , c ] ] )
|
def fts_count ( self , fts , inv ) :
"""Return the count of segments in an inventory matching a given
feature mask .
Args :
fts ( set ) : feature mask given as a set of ( value , feature ) tuples
inv ( set ) : inventory of segments ( as Unicode IPA strings )
Returns :
int : number of segments in ` inv ` that match feature mask ` fts `"""
|
return len ( list ( filter ( lambda s : self . fts_match ( fts , s ) , inv ) ) )
|
def smacof_p ( similarities , n_uq , metric = True , n_components = 2 , init = None , n_init = 8 , n_jobs = 1 , max_iter = 300 , verbose = 0 , eps = 1e-3 , random_state = None , return_n_iter = False ) :
"""Computes multidimensional scaling using SMACOF ( Scaling by Majorizing a
Complicated Function ) algorithm
The SMACOF algorithm is a multidimensional scaling algorithm : it minimizes
a objective function , the * stress * , using a majorization technique . The
Stress Majorization , also known as the Guttman Transform , guarantees a
monotone convergence of Stress , and is more powerful than traditional
techniques such as gradient descent .
The SMACOF algorithm for metric MDS can summarized by the following steps :
1 . Set an initial start configuration , randomly or not .
2 . Compute the stress
3 . Compute the Guttman Transform
4 . Iterate 2 and 3 until convergence .
The nonmetric algorithm adds a monotonic regression steps before computing
the stress .
Parameters
similarities : symmetric ndarray , shape ( n _ samples , n _ samples )
similarities between the points
metric : boolean , optional , default : True
compute metric or nonmetric SMACOF algorithm
n _ components : int , optional , default : 2
number of dimension in which to immerse the similarities
overridden if initial array is provided .
init : { None or ndarray of shape ( n _ samples , n _ components ) } , optional
if None , randomly chooses the initial configuration
if ndarray , initialize the SMACOF algorithm with this array
n _ init : int , optional , default : 8
Number of time the smacof _ p algorithm will be run with different
initialisation . The final results will be the best output of the
n _ init consecutive runs in terms of stress .
n _ jobs : int , optional , default : 1
The number of jobs to use for the computation . This works by breaking
down the pairwise matrix into n _ jobs even slices and computing them in
parallel .
If - 1 all CPUs are used . If 1 is given , no parallel computing code is
used at all , which is useful for debugging . For n _ jobs below - 1,
( n _ cpus + 1 + n _ jobs ) are used . Thus for n _ jobs = - 2 , all CPUs but one
are used .
max _ iter : int , optional , default : 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int , optional , default : 0
level of verbosity
eps : float , optional , default : 1e - 6
relative tolerance w . r . t stress to declare converge
random _ state : integer or numpy . RandomState , optional
The generator used to initialize the centers . If an integer is
given , it fixes the seed . Defaults to the global numpy random
number generator .
return _ n _ iter : bool
Whether or not to return the number of iterations .
Returns
X : ndarray ( n _ samples , n _ components )
Coordinates of the n _ samples points in a n _ components - space
stress : float
The final value of the stress ( sum of squared distance of the
disparities and the distances for all constrained points )
n _ iter : int
The number of iterations corresponding to the best stress .
Returned only if ` return _ n _ iter ` is set to True .
Notes
" Modern Multidimensional Scaling - Theory and Applications " Borg , I . ;
Groenen P . Springer Series in Statistics ( 1997)
" Nonmetric multidimensional scaling : a numerical method " Kruskal , J .
Psychometrika , 29 ( 1964)
" Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis " Kruskal , J . Psychometrika , 29 , ( 1964)"""
|
similarities = check_array ( similarities )
random_state = check_random_state ( random_state )
if hasattr ( init , '__array__' ) :
init = np . asarray ( init ) . copy ( )
if not n_init == 1 :
warnings . warn ( 'Explicit initial positions passed: ' 'performing only one init of the MDS instead of %d' % n_init )
n_init = 1
best_pos , best_stress = None , None
if n_jobs == 1 :
for it in range ( n_init ) :
pos , stress , n_iter_ = _smacof_single_p ( similarities , n_uq , metric = metric , n_components = n_components , init = init , max_iter = max_iter , verbose = verbose , eps = eps , random_state = random_state )
if best_stress is None or stress < best_stress :
best_stress = stress
best_pos = pos . copy ( )
best_iter = n_iter_
else :
seeds = random_state . randint ( np . iinfo ( np . int32 ) . max , size = n_init )
results = Parallel ( n_jobs = n_jobs , verbose = max ( verbose - 1 , 0 ) ) ( delayed ( _smacof_single_p ) ( similarities , n_uq , metric = metric , n_components = n_components , init = init , max_iter = max_iter , verbose = verbose , eps = eps , random_state = seed ) for seed in seeds )
positions , stress , n_iters = zip ( * results )
best = np . argmin ( stress )
best_stress = stress [ best ]
best_pos = positions [ best ]
best_iter = n_iters [ best ]
if return_n_iter :
return best_pos , best_stress , best_iter
else :
return best_pos , best_stress
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.