signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def _parse ( self , init_info ) :
"""Initialize a FCP device object from several lines of string
describing properties of the FCP device .
Here is a sample :
opnstk1 : FCP device number : B83D
opnstk1 : Status : Free
opnstk1 : NPIV world wide port number : NONE
opnstk1 : Channel path ID : 59
opnstk1 : Physical world wide port number : 20076D8500005181
The format comes from the response of xCAT , do not support
arbitrary format ."""
|
if isinstance ( init_info , list ) and ( len ( init_info ) == 5 ) :
self . _dev_no = self . _get_dev_number_from_line ( init_info [ 0 ] )
self . _npiv_port = self . _get_wwpn_from_line ( init_info [ 2 ] )
self . _chpid = self . _get_chpid_from_line ( init_info [ 3 ] )
self . _physical_port = self . _get_wwpn_from_line ( init_info [ 4 ] )
|
def get_content_macro_by_macro_id ( self , content_id , version , macro_id , callback = None ) :
"""Returns the body of a macro ( in storage format ) with the given id .
This resource is primarily used by connect applications that require the body of macro to perform their work .
When content is created , if no macroId is specified , then Confluence will generate a random id .
The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs .
To preserve backwards compatibility this resource will also match on the hash of the macro body , even if a
macroId is found . This check will become redundant as pages get macroId ' s generated for them and transparently
propagate out to all instances .
: param content _ id ( string ) : A string containing the id of the content .
: param version ( int ) : The version of the content to search .
: param macro _ id ( string ) : The macroID to find the corresponding macro .
: param callback : OPTIONAL : The callback to execute on the resulting data , before the method returns .
Default : None ( no callback , raw data returned ) .
: return : The JSON data returned from the endpoint , or the results of the callback .
Will raise requests . HTTPError on bad input , potentially ."""
|
return self . _service_get_request ( "rest/api/content/{id}/history/{version}/macro/id/{macro_id}" "" . format ( id = content_id , version = int ( version ) , macro_id = macro_id ) , callback = callback )
|
def combination_step ( self ) :
"""Update auxiliary state by a smart combination of previous
updates in the frequency domain ( standard FISTA
: cite : ` beck - 2009 - fast ` ) ."""
|
# Update t step
tprv = self . t
self . t = 0.5 * float ( 1. + np . sqrt ( 1. + 4. * tprv ** 2 ) )
# Update Y
if not self . opt [ 'FastSolve' ] :
self . Yfprv = self . Yf . copy ( )
self . Yf = self . Xf + ( ( tprv - 1. ) / self . t ) * ( self . Xf - self . Xfprv )
|
def competitions_submissions_upload ( self , file , guid , content_length , last_modified_date_utc , ** kwargs ) : # noqa : E501
"""Upload competition submission file # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . competitions _ submissions _ upload ( file , guid , content _ length , last _ modified _ date _ utc , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param file file : Competition submission file ( required )
: param str guid : Location where submission should be uploaded ( required )
: param int content _ length : Content length of file in bytes ( required )
: param int last _ modified _ date _ utc : Last modified date of file in milliseconds since epoch in UTC ( required )
: return : Result
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . competitions_submissions_upload_with_http_info ( file , guid , content_length , last_modified_date_utc , ** kwargs )
# noqa : E501
else :
( data ) = self . competitions_submissions_upload_with_http_info ( file , guid , content_length , last_modified_date_utc , ** kwargs )
# noqa : E501
return data
|
def scaffold ( args ) :
"""% prog scaffold ctgfasta reads1 . fasta mapping1 . bed
reads2 . fasta mapping2 . bed . . .
Run BAMBUS on set of contigs , reads and read mappings ."""
|
from jcvi . formats . base import FileMerger
from jcvi . formats . bed import mates
from jcvi . formats . contig import frombed
from jcvi . formats . fasta import join
from jcvi . utils . iter import grouper
p = OptionParser ( scaffold . __doc__ )
p . set_rclip ( rclip = 1 )
p . add_option ( "--conf" , help = "BAMBUS configuration file [default: %default]" )
p . add_option ( "--prefix" , default = False , action = "store_true" , help = "Only keep links between IDs with same prefix [default: %default]" )
opts , args = p . parse_args ( args )
nargs = len ( args )
if nargs < 3 or nargs % 2 != 1 :
sys . exit ( not p . print_help ( ) )
rclip = opts . rclip
ctgfasta = args [ 0 ]
duos = list ( grouper ( args [ 1 : ] , 2 ) )
trios = [ ]
for fastafile , bedfile in duos :
prefix = bedfile . rsplit ( "." , 1 ) [ 0 ]
matefile = prefix + ".mates"
matebedfile = matefile + ".bed"
if need_update ( bedfile , [ matefile , matebedfile ] ) :
matesopt = [ bedfile , "--lib" , "--nointra" , "--rclip={0}" . format ( rclip ) , "--cutoff={0}" . format ( opts . cutoff ) ]
if opts . prefix :
matesopt += [ "--prefix" ]
matefile , matebedfile = mates ( matesopt )
trios . append ( ( fastafile , matebedfile , matefile ) )
# Merge the readfasta , bedfile and matefile
bbfasta , bbbed , bbmate = "bambus.reads.fasta" , "bambus.bed" , "bambus.mates"
for files , outfile in zip ( zip ( * trios ) , ( bbfasta , bbbed , bbmate ) ) :
FileMerger ( files , outfile = outfile ) . merge ( checkexists = True )
ctgfile = "bambus.contig"
idsfile = "bambus.ids"
frombedInputs = [ bbbed , ctgfasta , bbfasta ]
if need_update ( frombedInputs , ctgfile ) :
frombed ( frombedInputs )
inputfasta = "bambus.contigs.fasta"
singletonfasta = "bambus.singletons.fasta"
cmd = "faSomeRecords {0} {1} " . format ( ctgfasta , idsfile )
sh ( cmd + inputfasta )
sh ( cmd + singletonfasta + " -exclude" )
# Run bambus
prefix = "bambus"
cmd = "goBambus -c {0} -m {1} -o {2}" . format ( ctgfile , bbmate , prefix )
if opts . conf :
cmd += " -C {0}" . format ( opts . conf )
sh ( cmd )
cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml" . format ( prefix )
sh ( cmd )
final = "final"
cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " "-merge -detail -oo -sum -o {1}" . format ( prefix , final )
sh ( cmd )
oofile = final + ".oo"
join ( [ inputfasta , "--oo={0}" . format ( oofile ) ] )
|
def addWriter ( self , selectable ) :
"""Add a FileDescriptor for notification of data available to write ."""
|
try :
self . _writes [ selectable ] . resume ( )
except KeyError :
self . _writes [ selectable ] = g = Stream . spawn ( self , selectable , 'doWrite' )
self . addToGreenletPool ( g )
|
def cronitor ( self ) :
"""Wrap run with requests to cronitor ."""
|
url = f'https://cronitor.link/{self.opts.cronitor}/{{}}'
try :
run_url = url . format ( 'run' )
self . logger . debug ( f'Pinging {run_url}' )
requests . get ( run_url , timeout = self . opts . timeout )
except requests . exceptions . RequestException as e :
self . logger . exception ( e )
# Cronitor may be having an outage , but we still want to run our stuff
output , exit_status = self . run ( )
endpoint = 'complete' if exit_status == 0 else 'fail'
try :
ping_url = url . format ( endpoint )
self . logger . debug ( 'Pinging {}' . format ( ping_url ) )
requests . get ( ping_url , timeout = self . opts . timeout )
except requests . exceptions . RequestException as e :
self . logger . exception ( e )
return output , exit_status
|
def get_utt_regions ( self ) :
"""Return the regions of all utterances , assuming all utterances are concatenated .
A region is defined by offset , length ( num - frames ) and
a list of references to the utterance datasets in the containers .
Returns :
list : List of with a tuple for every utterances containing the region info ."""
|
regions = [ ]
current_offset = 0
for utt_idx , utt_data in zip ( self . data . info . utt_ids , self . data . utt_data ) :
offset = current_offset
num_frames = [ ]
refs = [ ]
for part in utt_data :
num_frames . append ( part . shape [ 0 ] )
refs . append ( part )
if len ( set ( num_frames ) ) != 1 :
raise ValueError ( 'Utterance {} has not the same number of frames in all containers!' . format ( utt_idx ) )
num_chunks = math . ceil ( num_frames [ 0 ] / float ( self . frames_per_chunk ) )
region = ( offset , num_chunks , refs )
regions . append ( region )
# Sets the offset for the next utterances
current_offset += num_chunks
return regions
|
def _skip_remove ( self ) :
"""Skip packages from remove"""
|
if "--checklist" not in self . extra :
self . msg . template ( 78 )
print ( "| Insert packages to exception remove:" )
self . msg . template ( 78 )
try :
self . skip = raw_input ( " > " ) . split ( )
except EOFError :
print ( "" )
raise SystemExit ( )
for s in self . skip :
if s in self . removed :
self . removed . remove ( s )
|
def _parse ( cls , data , key = None ) :
"""Parse a set of data to extract entity - only data .
Use classmethod ` parse ` if available , otherwise use the ` endpoint `
class variable to extract data from a data blob ."""
|
parse = cls . parse if cls . parse is not None else cls . get_endpoint ( )
if callable ( parse ) :
data = parse ( data )
elif isinstance ( parse , str ) :
data = data [ key ]
else :
raise Exception ( '"parse" should be a callable or string got, {0}' . format ( parse ) )
return data
|
def calc_nu_b ( b ) :
"""Calculate the cyclotron frequency in Hz given a magnetic field strength in Gauss .
This is in cycles per second not radians per second ; i . e . there is a 2π in
the denominator : ν _ B = e B / ( 2π m _ e c )"""
|
return cgs . e * b / ( 2 * cgs . pi * cgs . me * cgs . c )
|
def _disappeared ( self , fd , path , ** params ) :
"""Called when an open path is no longer acessible . This will either
move the path to pending ( if the ' missing ' param is set for the
file ) , or fire an exception ."""
|
log = self . _getparam ( 'log' , self . _discard , ** params )
log . debug ( "Path %r removed or renamed, handling removal" , path )
self . _close ( fd )
if self . _mode == WF_POLLING and fd in self . _poll_stat :
del self . _poll_stat [ fd ]
if self . _mode == WF_INOTIFYX and path in self . _inx_inode :
del self . _inx_inode [ path ]
del self . fds_open [ fd ]
del self . paths_open [ path ]
if self . paths [ path ] :
try :
if self . _add_file ( path , ** params ) :
log . debug ( "Path %r immediately reappeared, pending transition skipped" , path )
return
except Exception as e :
log . debug ( "Path %r reappearance check failed -- %s" , path , e )
log . debug ( "Path %r marked as pending" , path )
self . paths_pending [ path ] = True
else :
del self . paths [ path ]
raise Exception ( "Path %r has been removed or renamed" % path )
|
def until ( self , func , message = '' , * args , ** kwargs ) :
"""Continues to execute the function until successful or time runs out .
: param func : function to execute
: param message : message to print if time ran out
: param args : arguments
: param kwargs : key word arguments
: return : result of function or None"""
|
value = None
end_time = time . time ( ) + self . _timeout
while True :
value = func ( * args , ** kwargs )
if self . _debug :
print ( "Value from func within ActionWait: {}" . format ( value ) )
if value :
break
time . sleep ( self . _poll )
self . _poll *= 1.25
if time . time ( ) > end_time :
raise RuntimeError ( message )
return value
|
def _ReadOperatingSystemArtifactValues ( self , operating_system_values ) :
"""Reads an operating system artifact from a dictionary .
Args :
operating _ system _ values ( dict [ str , object ] ) : operating system values .
Returns :
OperatingSystemArtifact : an operating system artifact attribute container .
Raises :
MalformedPresetError : if the format of the operating system values are
not set or incorrect ."""
|
if not operating_system_values :
raise errors . MalformedPresetError ( 'Missing operating system values.' )
family = operating_system_values . get ( 'family' , None )
product = operating_system_values . get ( 'product' , None )
version = operating_system_values . get ( 'version' , None )
if not family and not product :
raise errors . MalformedPresetError ( 'Invalid operating system missing family and product.' )
return artifacts . OperatingSystemArtifact ( family = family , product = product , version = version )
|
def determine_end_point ( http_request , url ) :
"""returns detail , list or aggregates"""
|
if url . endswith ( 'aggregates' ) or url . endswith ( 'aggregates/' ) :
return 'aggregates'
else :
return 'detail' if is_detail_url ( http_request , url ) else 'list'
|
def splitFile ( inputFileName , linePerFile , outPrefix ) :
"""Split a file .
: param inputFileName : the name of the input file .
: param linePerFile : the number of line per file ( after splitting ) .
: param outPrefix : the prefix of the output files .
: type inputFileName : str
: type linePerFile : int
: type outPrefix : str
: returns : the number of created temporary files .
Splits a file ( ` ` inputFileName ` ` into multiple files containing at most
` ` linePerFile ` ` lines ."""
|
nbTmpFile = 1
nbLine = 0
tmpFile = None
try :
with open ( inputFileName , "r" ) as inputFile :
for line in inputFile :
row = line . rstrip ( "\r\n" ) . split ( " " )
nbLine += 1
if tmpFile is None :
try :
tmpFile = open ( outPrefix + "_tmp.list%d" % nbTmpFile , "w" , )
except IOError :
msg = "tmp.list%d: can't write file" % nbTmpFile
raise ProgramError ( msg )
print >> tmpFile , " " . join ( row [ : 2 ] )
if nbLine == linePerFile :
nbLine = 0
nbTmpFile += 1
tmpFile . close ( )
try :
tmpFile = open ( outPrefix + "_tmp.list%d" % nbTmpFile , "w" , )
except IOError :
msg = "tmp.list%d: can't write file" % nbTmpFile
raise ProgramError ( msg )
tmpFile . close ( )
# Check if the number of line is zero ( hence the last file is empty )
if nbLine == 0 : # We delete the last file
file_name = outPrefix + "_tmp.list{}" . format ( nbTmpFile )
if os . path . isfile ( file_name ) :
os . remove ( file_name )
nbTmpFile -= 1
except IOError :
msg = "%s: no such file" % inputFileName
raise ProgramError ( msg )
return nbTmpFile
|
def make_ac_name_map ( assy_name , primary_only = False ) :
"""make map from accession ( str ) to sequence name ( str ) for given assembly name
> > > grch38p5 _ ac _ name _ map = make _ ac _ name _ map ( ' GRCh38 . p5 ' )
> > > grch38p5 _ ac _ name _ map [ ' NC _ 000001.11 ' ]"""
|
return { s [ 'refseq_ac' ] : s [ 'name' ] for s in get_assembly ( assy_name ) [ 'sequences' ] if ( not primary_only or _is_primary ( s ) ) }
|
def verify_signature_block ( certificate_file , content , signature ) :
"""Verifies the ' signature ' over the ' content ' , trusting the
' certificate ' .
: param certificate _ file : the trusted certificate ( PEM format )
: type certificate _ file : str
: param content : The signature should match this content
: type content : str
: param signature : data ( DER format ) subject to check
: type signature : str
: return None if the signature validates .
: exception SignatureBlockVerificationError"""
|
sig_bio = BIO . MemoryBuffer ( signature )
pkcs7 = SMIME . PKCS7 ( m2 . pkcs7_read_bio_der ( sig_bio . _ptr ( ) ) , 1 )
signers_cert_stack = pkcs7 . get0_signers ( X509 . X509_Stack ( ) )
trusted_cert_store = X509 . X509_Store ( )
trusted_cert_store . set_verify_cb ( ignore_missing_email_protection_eku_cb )
trusted_cert_store . load_info ( certificate_file )
smime = SMIME . SMIME ( )
smime . set_x509_stack ( signers_cert_stack )
smime . set_x509_store ( trusted_cert_store )
data_bio = BIO . MemoryBuffer ( content )
try :
smime . verify ( pkcs7 , data_bio )
except SMIME . PKCS7_Error as message :
raise SignatureBlockVerificationError ( message )
else :
return None
|
def optimize ( update_working_block = True , block = None , skip_sanity_check = False ) :
"""Return an optimized version of a synthesized hardware block .
: param Boolean update _ working _ block : Don ' t copy the block and optimize the
new block
: param Block block : the block to optimize ( defaults to working block )
Note :
optimize works on all hardware designs , both synthesized and non synthesized"""
|
block = working_block ( block )
if not update_working_block :
block = copy_block ( block )
with set_working_block ( block , no_sanity_check = True ) :
if ( not skip_sanity_check ) or debug_mode :
block . sanity_check ( )
_remove_wire_nets ( block )
constant_propagation ( block , True )
_remove_unlistened_nets ( block )
common_subexp_elimination ( block )
if ( not skip_sanity_check ) or debug_mode :
block . sanity_check ( )
return block
|
def run ( self ) :
'''Creates the actor thread wich will process the channel queue
while the actor : meth : ` is _ alive ` , making it able to receive
queries .'''
|
self . thread = Thread ( target = self . __processQueue )
self . thread . start ( )
|
def self_issued ( self ) :
""": return :
A boolean - if the certificate is self - issued , as defined by RFC
5280"""
|
if self . _self_issued is None :
self . _self_issued = self . subject == self . issuer
return self . _self_issued
|
def set_ref_b ( self , text_ref ) :
"""Set the reference values related to the str _ b compared string
: param text _ info : dict
- - author : str
- - work : str
- - subwork : str
- - text _ n : str ( a string instead of integer for variations
in numbering systems that may inlude integers and alpha
characters ( e . g . ' 101b ' ) )
: return : void"""
|
if 'author' in text_ref :
self . author_b = text_ref [ 'author' ]
if 'work' in text_ref :
self . work_b = text_ref [ 'work' ]
if 'subwork' in text_ref :
self . subwork_b = text_ref [ 'subwork' ]
if 'text_n' in text_ref :
self . text_n_b = text_ref [ 'text_n' ]
if 'language' in text_ref :
self . language_b = text_ref [ 'language' ]
return
|
def api_request ( self , method_name , params ) :
"""Execute an arbitrary method .
Args :
method _ name ( str ) : include the controller name : ' devices / search '
params ( dict ) : the method parameters
Returns :
A dict with the response
Raises :
requests . exceptions . HTTPError"""
|
url = self . _method_url ( method_name )
data = json . dumps ( params )
return self . _make_request ( url = url , method = "post" , data = data )
|
def add_content ( self , ** content ) :
"""Adds given content to the cache .
Usage : :
> > > cache = Cache ( )
> > > cache . add _ content ( John = " Doe " , Luke = " Skywalker " )
True
> > > cache
{ ' Luke ' : ' Skywalker ' , ' John ' : ' Doe ' }
: param \ * \ * content : Content to add .
: type \ * \ * content : \ * \ *
: return : Method success .
: rtype : bool"""
|
LOGGER . debug ( "> Adding '{0}' content to the cache." . format ( self . __class__ . __name__ , content ) )
self . update ( ** content )
return True
|
def bar ( it , label = "" , width = 32 , hide = None , empty_char = BAR_EMPTY_CHAR , filled_char = BAR_FILLED_CHAR , expected_size = None , every = 1 , ) :
"""Progress iterator . Wrap your iterables with it ."""
|
count = len ( it ) if expected_size is None else expected_size
with Bar ( label = label , width = width , hide = hide , empty_char = BAR_EMPTY_CHAR , filled_char = BAR_FILLED_CHAR , expected_size = count , every = every , ) as bar :
for i , item in enumerate ( it ) :
yield item
bar . show ( i + 1 )
|
def worse ( src_amount , src_currency , target_amount_obtained , target_currency , date = None , valid_days_max = None ) :
"""will calculate a difference between the calculated target amount and the amount you give as src _ amount
if you will obtain target _ amount _ obtained instead
valid _ days _ max : see rate ( )
returns a tuple : ( percent , difference _ src _ currency , difference _ target _ currency )"""
|
calculated = convert ( src_amount , src_currency , target = target_currency , date = date , valid_days_max = valid_days_max )
worse = calculated - target_amount_obtained
worse_src = convert ( worse , target_currency , target = src_currency , date = date , valid_days_max = valid_days_max )
if src_amount :
return worse_src / src_amount * 100.0 , worse_src , worse
elif not target_amount_obtained :
return 0.0 , worse_src , worse
else :
return float ( 'inf' ) if ( target_amount_obtained < 0 ) else float ( '-inf' ) , worse_src , worse
|
def data_iter ( batch_size , num_embed , pre_trained_word2vec = False ) :
"""Construct data iter
Parameters
batch _ size : int
num _ embed : int
pre _ trained _ word2vec : boolean
identify the pre - trained layers or not
Returns
train _ set : DataIter
Train DataIter
valid : DataIter
Valid DataIter
sentences _ size : int
array dimensions
embedded _ size : int
array dimensions
vocab _ size : int
array dimensions"""
|
print ( 'Loading data...' )
if pre_trained_word2vec :
word2vec = data_helpers . load_pretrained_word2vec ( 'data/rt.vec' )
x , y = data_helpers . load_data_with_word2vec ( word2vec )
# reshape for convolution input
x = np . reshape ( x , ( x . shape [ 0 ] , 1 , x . shape [ 1 ] , x . shape [ 2 ] ) )
embedded_size = x . shape [ - 1 ]
sentences_size = x . shape [ 2 ]
vocabulary_size = - 1
else :
x , y , vocab , vocab_inv = data_helpers . load_data ( )
embedded_size = num_embed
sentences_size = x . shape [ 1 ]
vocabulary_size = len ( vocab )
# randomly shuffle data
np . random . seed ( 10 )
shuffle_indices = np . random . permutation ( np . arange ( len ( y ) ) )
x_shuffled = x [ shuffle_indices ]
y_shuffled = y [ shuffle_indices ]
# split train / valid set
x_train , x_dev = x_shuffled [ : - 1000 ] , x_shuffled [ - 1000 : ]
y_train , y_dev = y_shuffled [ : - 1000 ] , y_shuffled [ - 1000 : ]
print ( 'Train/Valid split: %d/%d' % ( len ( y_train ) , len ( y_dev ) ) )
print ( 'train shape:' , x_train . shape )
print ( 'valid shape:' , x_dev . shape )
print ( 'sentence max words' , sentences_size )
print ( 'embedding size' , embedded_size )
print ( 'vocab size' , vocabulary_size )
train_set = mx . io . NDArrayIter ( x_train , y_train , batch_size , shuffle = True )
valid = mx . io . NDArrayIter ( x_dev , y_dev , batch_size )
return train_set , valid , sentences_size , embedded_size , vocabulary_size
|
def Latex ( formula , pos = ( 0 , 0 , 0 ) , normal = ( 0 , 0 , 1 ) , c = 'k' , s = 1 , bg = None , alpha = 1 , res = 30 , usetex = False , fromweb = False , ) :
"""Render Latex formulas .
: param str formula : latex text string
: param list pos : position coordinates in space
: param list normal : normal to the plane of the image
: param c : face color
: param bg : background color box
: param int res : dpi resolution
: param bool usetex : use latex compiler of matplotlib
: param fromweb : retrieve the latex image from online server ( codecogs )
. . hint : : | latex | | latex . py | _"""
|
try : # def _ Latex ( formula , pos , normal , c , s , bg , alpha , res , usetex , fromweb ) :
def build_img_web ( formula , tfile ) :
import requests
if c == 'k' :
ct = 'Black'
else :
ct = 'White'
wsite = 'http://latex.codecogs.com/png.latex'
try :
r = requests . get ( wsite + '?\dpi{100} \huge \color{' + ct + '} ' + formula )
f = open ( tfile , 'wb' )
f . write ( r . content )
f . close ( )
except requests . exceptions . ConnectionError :
colors . printc ( 'Latex error. Web site unavailable?' , wsite , c = 1 )
return None
def build_img_plt ( formula , tfile ) :
import matplotlib . pyplot as plt
plt . rc ( 'text' , usetex = usetex )
formula1 = '$' + formula + '$'
plt . axis ( 'off' )
col = colors . getColor ( c )
if bg :
bx = dict ( boxstyle = "square" , ec = col , fc = colors . getColor ( bg ) )
else :
bx = None
plt . text ( 0.5 , 0.5 , formula1 , size = res , color = col , alpha = alpha , ha = "center" , va = "center" , bbox = bx )
plt . savefig ( '_lateximg.png' , format = 'png' , transparent = True , bbox_inches = 'tight' , pad_inches = 0 )
plt . close ( )
if fromweb :
build_img_web ( formula , '_lateximg.png' )
else :
build_img_plt ( formula , '_lateximg.png' )
from vtkplotter . actors import ImageActor
picr = vtk . vtkPNGReader ( )
picr . SetFileName ( '_lateximg.png' )
picr . Update ( )
vactor = ImageActor ( )
vactor . SetInputData ( picr . GetOutput ( ) )
vactor . alpha ( alpha )
b = vactor . GetBounds ( )
xm , ym = ( b [ 1 ] + b [ 0 ] ) / 200 * s , ( b [ 3 ] + b [ 2 ] ) / 200 * s
vactor . SetOrigin ( - xm , - ym , 0 )
nax = np . linalg . norm ( normal )
if nax :
normal = np . array ( normal ) / nax
theta = np . arccos ( normal [ 2 ] )
phi = np . arctan2 ( normal [ 1 ] , normal [ 0 ] )
vactor . SetScale ( 0.25 / res * s , 0.25 / res * s , 0.25 / res * s )
vactor . RotateZ ( phi * 57.3 )
vactor . RotateY ( theta * 57.3 )
vactor . SetPosition ( pos )
try :
import os
os . unlink ( '_lateximg.png' )
except FileNotFoundError :
pass
return vactor
except :
colors . printc ( 'Error in Latex()\n' , formula , c = 1 )
colors . printc ( ' latex or dvipng not installed?' , c = 1 )
colors . printc ( ' Try: usetex=False' , c = 1 )
colors . printc ( ' Try: sudo apt install dvipng' , c = 1 )
return None
|
def add_local ( self , source_fpath , version = None , tags = None ) :
"""Copies a given file into local store as an instance of this dataset .
Parameters
source _ fpath : str
The full path for the source file to use .
version : str , optional
The version of the instance of this dataset .
tags : list of str , optional
The tags associated with the given instance of this dataset .
Returns
ext : str
The extension of the file added ."""
|
ext = os . path . splitext ( source_fpath ) [ 1 ]
ext = ext [ 1 : ]
# we dont need the dot
fpath = self . fpath ( version = version , tags = tags , ext = ext )
shutil . copyfile ( src = source_fpath , dst = fpath )
return ext
|
def polyz ( self , polys ) :
"""Creates a POLYGONZ shape .
Polys is a collection of polygons , each made up of a list of xyzm values .
Note that for ordinary polygons the coordinates must run in a clockwise direction .
If some of the polygons are holes , these must run in a counterclockwise direction .
If the z ( elevation ) value is not included , it defaults to 0.
If the m ( measure ) value is not included , it defaults to None ( NoData ) ."""
|
shapeType = POLYGONZ
self . _shapeparts ( parts = polys , shapeType = shapeType )
|
def get_actions ( self , params ) :
"""Send a HTTP request to the satellite ( GET / _ checks )
Get actions from the scheduler .
Un - serialize data received .
: param params : the request parameters
: type params : str
: return : Actions list on success , [ ] on failure
: rtype : list"""
|
res = self . con . get ( '_checks' , params , wait = True )
logger . debug ( "Got checks to execute from %s: %s" , self . name , res )
return unserialize ( res , True )
|
def sub_index ( self , sub , start = 0 , end = None ) :
"""Return the index of a subsequence .
This runs in O ( len ( sub ) )
Args :
sub ( Sequence ) : An Iterable to search for
Returns :
int : The index of the first element of sub
Raises :
ValueError : If sub isn ' t a subsequence
TypeError : If sub isn ' t iterable
IndexError : If start or end are out of range"""
|
start_index = self . index ( sub [ 0 ] , start , end )
end = self . _fix_end_index ( end )
if start_index + len ( sub ) > end :
raise ValueError
for i in range ( 1 , len ( sub ) ) :
if sub [ i ] != self [ start_index + i ] :
raise ValueError
return start_index
|
def print_line ( host , path , line , line_type ) :
"""Print a dump tool line to stdout .
: param host : the source host
: type host : str
: param path : the path to the file that is being analyzed
: type path : str
: param line : the line to be printed
: type line : str
: param line _ type : a header for the line
: type line _ type : str"""
|
print ( "{ltype} Host: {host}, File: {path}" . format ( ltype = line_type , host = host , path = path , ) )
print ( "{ltype} Output: {line}" . format ( ltype = line_type , line = line ) )
|
def perform_command ( self ) :
"""Perform command and return the appropriate exit code .
: rtype : int"""
|
if len ( self . actual_arguments ) < 2 :
return self . print_help ( )
source_url = self . actual_arguments [ 0 ]
output_file_path = self . actual_arguments [ 1 ]
download = not self . has_option ( "--list" )
# largest _ audio = True by default or if explicitly given
if self . has_option ( "--largest-audio" ) :
largest_audio = True
else :
largest_audio = not self . has_option ( "--smallest-audio" )
download_format = self . has_option_with_value ( "--format" )
try :
if download :
self . print_info ( u"Downloading audio stream from '%s' ..." % source_url )
downloader = Downloader ( logger = self . logger )
result = downloader . audio_from_youtube ( source_url , download = True , output_file_path = output_file_path , download_format = download_format , largest_audio = largest_audio , )
self . print_info ( u"Downloading audio stream from '%s' ... done" % source_url )
self . print_success ( u"Downloaded file '%s'" % result )
else :
self . print_info ( u"Downloading stream info from '%s' ..." % source_url )
downloader = Downloader ( logger = self . logger )
result = downloader . audio_from_youtube ( source_url , download = False )
self . print_info ( u"Downloading stream info from '%s' ... done" % source_url )
msg = [ ]
msg . append ( u"%s\t%s\t%s\t%s" % ( "Format" , "Extension" , "Bitrate" , "Size" ) )
for r in result :
filesize = gf . human_readable_number ( r [ "filesize" ] )
msg . append ( u"%s\t%s\t%s\t%s" % ( r [ "format" ] , r [ "ext" ] , r [ "abr" ] , filesize ) )
self . print_generic ( u"Available audio streams:" )
self . print_generic ( u"\n" . join ( msg ) )
return self . NO_ERROR_EXIT_CODE
except ImportError :
self . print_no_dependency_error ( )
except Exception as exc :
self . print_error ( u"An unexpected error occurred while downloading audio from YouTube:" )
self . print_error ( u"%s" % exc )
return self . ERROR_EXIT_CODE
|
def add ( self , ** kwargs ) :
"""Just a shortcut to change the current url , equivalent to Url ( self , * * kwargs )"""
|
if "path" in kwargs :
path = kwargs [ "path" ]
if isinstance ( path , bytes ) :
path = String ( path )
if not path [ 0 ] . startswith ( "/" ) :
paths = self . normalize_paths ( self . path , path )
else :
paths = self . normalize_paths ( path )
kwargs [ "path" ] = "/" . join ( paths )
return self . create ( self , ** kwargs )
|
def main ( argv = sys . argv [ 1 : ] ) :
"""Parse commandline arguments and run the tool
: param argv : the commandline arguments .
: type argv : list
: returns : None
: rtype : None
: raises : None"""
|
parser = setup_argparse ( )
args = parser . parse_args ( argv )
if args . gendochelp :
sys . argv [ 0 ] = 'gendoc.py'
genparser = gendoc . setup_parser ( )
genparser . print_help ( )
sys . exit ( 0 )
print 'Preparing output directories'
print '=' * 80
for odir in args . output :
prepare_dir ( odir , not args . nodelete )
print '\nRunning gendoc'
print '=' * 80
for i , idir in enumerate ( args . input ) :
if i >= len ( args . output ) :
odir = args . output [ - 1 ]
else :
odir = args . output [ i ]
run_gendoc ( idir , odir , args . gendocargs )
|
def tag ( ) :
"""Tag current version ."""
|
if check_unstaged ( ) :
raise EnvironmentError ( 'There are staged changes, abort.' )
with open ( str ( INIT_PATH ) ) as f :
metadata = dict ( re . findall ( "__([a-z]+)__ = '([^']+)'" , f . read ( ) ) )
version = metadata [ 'version' ]
check_output ( [ 'git' , 'tag' , version , '-m' , 'Release v{}' . format ( version ) ] )
|
def is_native_ion_gate ( gate : ops . Gate ) -> bool :
"""Check if a gate is a native ion gate .
Args :
gate : Input gate .
Returns :
True if the gate is native to the ion , false otherwise ."""
|
return isinstance ( gate , ( ops . XXPowGate , ops . MeasurementGate , ops . XPowGate , ops . YPowGate , ops . ZPowGate ) )
|
def json_export ( self , dest , fieldnames = None , encoding = "UTF-8" ) :
"""Exports the contents of the table to a JSON - formatted file .
@ param dest : output file - if a string is given , the file with that name will be
opened , written , and closed ; if a file object is given , then that object
will be written as - is , and left for the caller to be closed .
@ type dest : string or file
@ param fieldnames : attribute names to be exported ; can be given as a single
string with space - delimited names , or as a list of attribute names
@ type fieldnames : list of strings
@ param encoding : string ( default = " UTF - 8 " ) ; if csv _ dest is provided as a string
representing an output filename , an encoding argument can be provided ( Python 3 only )
@ type encoding : string"""
|
close_on_exit = False
if isinstance ( dest , basestring ) :
if PY_3 :
dest = open ( dest , 'w' , encoding = encoding )
else :
dest = open ( dest , 'w' )
close_on_exit = True
try :
if isinstance ( fieldnames , basestring ) :
fieldnames = fieldnames . split ( )
if fieldnames is None :
do_all ( dest . write ( _to_json ( o ) + '\n' ) for o in self . obs )
else :
do_all ( dest . write ( json . dumps ( ODict ( ( f , getattr ( o , f ) ) for f in fieldnames ) ) + '\n' ) for o in self . obs )
finally :
if close_on_exit :
dest . close ( )
|
def get_block_from_consensus ( self , consensus_hash ) :
"""Get the block number with the given consensus hash .
Return None if there is no such block ."""
|
query = 'SELECT block_id FROM snapshots WHERE consensus_hash = ?;'
args = ( consensus_hash , )
con = self . db_open ( self . impl , self . working_dir )
rows = self . db_query_execute ( con , query , args , verbose = False )
res = None
for r in rows :
res = r [ 'block_id' ]
con . close ( )
return res
|
def cmd_ok ( cmd ) :
"""Returns True if cmd can be run ."""
|
try :
sp . check_call ( cmd , stderr = sp . PIPE , stdout = sp . PIPE )
except sp . CalledProcessError : # bwa gives return code of 1 with no argument
pass
except :
sys . stderr . write ( "{} not found, skipping\n" . format ( cmd ) )
return False
return True
|
def _get_cassandra_config ( self ) :
"""Retrieve a dict containing Cassandra client config params ."""
|
parts = urlsplit ( os . environ . get ( 'CASSANDRA_URI' , DEFAULT_URI ) )
if parts . scheme != 'cassandra' :
raise RuntimeError ( 'CASSANDRA_URI scheme is not "cassandra://"!' )
_ , _ , ip_addresses = socket . gethostbyname_ex ( parts . hostname )
if not ip_addresses :
raise RuntimeError ( 'Unable to find Cassandra in DNS!' )
return { 'contact_points' : ip_addresses , 'port' : parts . port or DEFAULT_PORT , }
|
def __write_srgb ( self , outfile ) :
"""Write colour reference information : gamma , iccp etc .
This method should be called only from ` ` write _ idat ` ` method
or chunk order will be ruined ."""
|
if self . rendering_intent is not None and self . icc_profile is not None :
raise FormatError ( "sRGB(via rendering_intent) and iCCP could not" "be present simultaneously" )
# http : / / www . w3 . org / TR / PNG / # 11sRGB
if self . rendering_intent is not None :
write_chunk ( outfile , 'sRGB' , struct . pack ( "B" , int ( self . rendering_intent ) ) )
# http : / / www . w3 . org / TR / PNG / # 11cHRM
if ( self . white_point is not None and self . rgb_points is None ) or ( self . white_point is None and self . rgb_points is not None ) :
logging . warn ( "White and RGB points should be both specified to" " write cHRM chunk" )
self . white_point = None
self . rgb_points = None
if ( self . white_point is not None and self . rgb_points is not None ) :
data = ( self . white_point [ 0 ] , self . white_point [ 1 ] , self . rgb_points [ 0 ] [ 0 ] , self . rgb_points [ 0 ] [ 1 ] , self . rgb_points [ 1 ] [ 0 ] , self . rgb_points [ 1 ] [ 1 ] , self . rgb_points [ 2 ] [ 0 ] , self . rgb_points [ 2 ] [ 1 ] , )
write_chunk ( outfile , 'cHRM' , struct . pack ( "!8L" , * [ int ( round ( it * 1e5 ) ) for it in data ] ) )
# http : / / www . w3 . org / TR / PNG / # 11gAMA
if self . gamma is not None :
write_chunk ( outfile , 'gAMA' , struct . pack ( "!L" , int ( round ( self . gamma * 1e5 ) ) ) )
# http : / / www . w3 . org / TR / PNG / # 11iCCP
if self . icc_profile is not None :
if self . compression is None or self . compression == - 1 :
comp_level = 6
# zlib . Z _ DEFAULT _ COMPRESSION
else :
comp_level = self . compression
write_chunk ( outfile , 'iCCP' , self . icc_profile [ 0 ] + zerobyte + zerobyte + zlib . compress ( self . icc_profile [ 1 ] , comp_level ) )
|
def _initStormCmds ( self ) :
'''Registration for built - in Storm commands .'''
|
self . addStormCmd ( s_storm . MaxCmd )
self . addStormCmd ( s_storm . MinCmd )
self . addStormCmd ( s_storm . HelpCmd )
self . addStormCmd ( s_storm . IdenCmd )
self . addStormCmd ( s_storm . SpinCmd )
self . addStormCmd ( s_storm . SudoCmd )
self . addStormCmd ( s_storm . UniqCmd )
self . addStormCmd ( s_storm . CountCmd )
self . addStormCmd ( s_storm . GraphCmd )
self . addStormCmd ( s_storm . LimitCmd )
self . addStormCmd ( s_storm . SleepCmd )
self . addStormCmd ( s_storm . DelNodeCmd )
self . addStormCmd ( s_storm . MoveTagCmd )
self . addStormCmd ( s_storm . ReIndexCmd )
|
def create_host_template ( resource_root , name , cluster_name ) :
"""Create a host template .
@ param resource _ root : The root Resource object .
@ param name : Host template name
@ param cluster _ name : Cluster name
@ return : An ApiHostTemplate object for the created host template .
@ since : API v3"""
|
apitemplate = ApiHostTemplate ( resource_root , name , [ ] )
return call ( resource_root . post , HOST_TEMPLATES_PATH % ( cluster_name , ) , ApiHostTemplate , True , data = [ apitemplate ] , api_version = 3 ) [ 0 ]
|
def _tls_encrypt ( alg , p ) :
"""Provided with an already MACed TLSCompressed packet , and a stream or block
cipher alg , the function converts it into a TLSCiphertext ( i . e . encrypts it
and updates length ) . The function returns a newly created TLSCiphertext
instance ."""
|
c = TLSCiphertext ( )
c . type = p . type
c . version = p . version
c . data = alg . encrypt ( p . data )
c . len = len ( c . data )
return c
|
def muc ( clusters , mention_to_gold ) :
"""Counts the mentions in each predicted cluster which need to be re - allocated in
order for each predicted cluster to be contained by the respective gold cluster .
< http : / / aclweb . org / anthology / M / M95 / M95-1005 . pdf >"""
|
true_p , all_p = 0 , 0
for cluster in clusters :
all_p += len ( cluster ) - 1
true_p += len ( cluster )
linked = set ( )
for mention in cluster :
if mention in mention_to_gold :
linked . add ( mention_to_gold [ mention ] )
else :
true_p -= 1
true_p -= len ( linked )
return true_p , all_p
|
def post ( self , user_ids = None , usernames = None , status = None ) :
""": param user _ ids : list of int of the user _ ids to return
: param usernames : list of str of the usernames to return
: param status : str of the status
: return : list of User"""
|
return self . connection . post ( 'user/array' , data = dict ( user_ids = user_ids , usernames = usernames , status = status ) )
|
def get_single_score ( self , point , centroids = None , sd = None ) :
"""Get a single score is a wrapper around the result of classifying a Point against a group of centroids . Attributes :
observation _ score ( dict ) : Original received point and normalised point .
: Example :
> > > { " original " : [ 0.40369016 , 0.65217912 ] , " normalised " : [ 1.65915104 , 3.03896181 ] }
nearest _ cluster ( int ) : Index of the nearest cluster . If distances match , then lowest numbered cluster wins .
distances ( list ( float ) ) : List of distances from the Point to each cluster centroid . E . g :
> > > [ 2.38086238 , 0.12382605 , 2.0362993 , 1.43195021]
centroids ( list ( list ( float ) ) ) : A list of the current centroidswhen queried . E . g :
> > > [ [ 0.23944831 , 1.12769265 ] , [ 1.75621978 , 3.11584191 ] , [ 2.65884563 , 1.26494783 ] , [ 0.39421099 , 2.36783733 ] ]
: param point : the point to classify
: type point : pandas . DataFrame
: param centroids : the centroids
: type centroids : np . array
: param sd : the standard deviation
: type sd : np . array
: return score : the score for a given observation
: rtype score : int"""
|
normalised_point = array ( point ) / array ( sd )
observation_score = { 'original' : point , 'normalised' : normalised_point . tolist ( ) , }
distances = [ euclidean ( normalised_point , centroid ) for centroid in centroids ]
return int ( distances . index ( min ( distances ) ) )
|
def update ( self , rec = None , drop = None , tables = None , install = None , materialize = None , indexes = None , joins = 0 , views = 0 ) :
"""Updates current record .
Args :
rec ( FIMRecord ) :"""
|
if not drop :
drop = [ ]
if not tables :
tables = set ( )
if not install :
install = set ( )
if not materialize :
materialize = set ( )
if not indexes :
indexes = set ( )
if rec :
self . update ( drop = rec . drop , tables = rec . tables , install = rec . install , materialize = rec . materialize , indexes = rec . indexes , joins = rec . joins )
self . drop += drop
self . tables |= set ( tables )
self . install |= set ( install )
self . materialize |= set ( materialize )
self . indexes |= set ( indexes )
self . joins += joins
self . views += views
# Joins or views promote installed partitions to materialized partitions
if self . joins > 0 or self . views > 0 :
self . materialize |= self . install
self . install = set ( )
|
def get_time_server ( ) :
'''Display the currently set network time server .
: return : the network time server
: rtype : str
CLI Example :
. . code - block : : bash
salt ' * ' timezone . get _ time _ server'''
|
ret = salt . utils . mac_utils . execute_return_result ( 'systemsetup -getnetworktimeserver' )
return salt . utils . mac_utils . parse_return ( ret )
|
def _convert_xml_to_shares ( response ) :
'''< ? xml version = " 1.0 " encoding = " utf - 8 " ? >
< EnumerationResults AccountName = " https : / / myaccount . file . core . windows . net " >
< Prefix > string - value < / Prefix >
< Marker > string - value < / Marker >
< MaxResults > int - value < / MaxResults >
< Shares >
< Share >
< Name > share - name < / Name >
< Snapshot > date - time - value < / Snapshot >
< Properties >
< Last - Modified > date / time - value < / Last - Modified >
< Etag > etag < / Etag >
< Quota > max - share - size < / Quota >
< / Properties >
< Metadata >
< metadata - name > value < / metadata - name >
< / Metadata >
< / Share >
< / Shares >
< NextMarker > marker - value < / NextMarker >
< / EnumerationResults >'''
|
if response is None or response . body is None :
return None
shares = _list ( )
list_element = ETree . fromstring ( response . body )
# Set next marker
next_marker = list_element . findtext ( 'NextMarker' ) or None
setattr ( shares , 'next_marker' , next_marker )
shares_element = list_element . find ( 'Shares' )
for share_element in shares_element . findall ( 'Share' ) : # Name element
share = Share ( )
share . name = share_element . findtext ( 'Name' )
# Snapshot
share . snapshot = share_element . findtext ( 'Snapshot' )
# Metadata
metadata_root_element = share_element . find ( 'Metadata' )
if metadata_root_element is not None :
share . metadata = dict ( )
for metadata_element in metadata_root_element :
share . metadata [ metadata_element . tag ] = metadata_element . text
# Properties
properties_element = share_element . find ( 'Properties' )
share . properties . last_modified = parser . parse ( properties_element . findtext ( 'Last-Modified' ) )
share . properties . etag = properties_element . findtext ( 'Etag' )
share . properties . quota = int ( properties_element . findtext ( 'Quota' ) )
# Add share to list
shares . append ( share )
return shares
|
def send ( self , cmd = "" , timeout = 300 , wait_for_string = None , password = False ) :
"""Send the command to the device and return the output .
Args :
cmd ( str ) : Command string for execution . Defaults to empty string .
timeout ( int ) : Timeout in seconds . Defaults to 300 sec ( 5 min )
wait _ for _ string ( str ) : This is optional string that driver
waits for after command execution . If none the detected
prompt will be used .
password ( bool ) : If true cmd representing password is not logged
and condoor waits for noecho .
Returns :
A string containing the command output .
Raises :
ConnectionError : General connection error during command execution
CommandSyntaxError : Command syntax error or unknown command .
CommandTimeoutError : Timeout during command execution"""
|
return self . _chain . send ( cmd , timeout , wait_for_string , password )
|
def get_icinga_stats ( self , app_stats ) :
"""Extract metrics from ' programstatus '"""
|
stats = { }
stats = dict ( stats . items ( ) + self . _get_active_stats ( app_stats ) . items ( ) )
stats = dict ( stats . items ( ) + self . _get_cached_stats ( app_stats ) . items ( ) )
stats = dict ( stats . items ( ) + self . _get_command_execution ( app_stats ) . items ( ) )
stats = dict ( stats . items ( ) + self . _get_externalcmd_stats ( app_stats ) . items ( ) )
stats [ "uptime" ] = self . _get_uptime ( app_stats )
return stats
|
def custom_except_hook ( exc_info ) :
"""A custom excepthook to present python errors produced by the CLI .
We don ' t want to show end users big scary stacktraces if they aren ' t python
programmers , so slim it down to some basic info . We keep a " DEBUGMODE " env
variable kicking around to let us turn on stacktraces if we ever need them .
Additionally , does global suppression of EPIPE errors , which often occur
when a python command is piped to a consumer like ` head ` which closes its
input stream before python has sent all of its output .
DANGER : There is a ( small ) risk that this will bite us if there are EPIPE
errors produced within the Globus SDK . We should keep an eye on this
possibility , as it may demand more sophisticated handling of EPIPE .
Possible TODO item to reduce this risk : inspect the exception and only hide
EPIPE if it comes from within the globus _ cli package ."""
|
exception_type , exception , traceback = exc_info
# check if we ' re in debug mode , and run the real excepthook if we are
ctx = click . get_current_context ( )
state = ctx . ensure_object ( CommandState )
if state . debug :
sys . excepthook ( exception_type , exception , traceback )
# we ' re not in debug mode , do custom handling
else : # if it ' s a click exception , re - raise as original - - Click ' s main
# execution context will handle pretty - printing
if isinstance ( exception , click . ClickException ) :
reraise ( exception_type , exception , traceback )
# catch any session errors to give helpful instructions
# on how to use globus session update
elif ( isinstance ( exception , exc . GlobusAPIError ) and exception . raw_json and "authorization_parameters" in exception . raw_json ) :
session_hook ( exception )
# handle the Globus - raised errors with our special hooks
# these will present the output ( on stderr ) as JSON
elif isinstance ( exception , exc . TransferAPIError ) :
if exception . code == "ClientError.AuthenticationFailed" :
authentication_hook ( exception )
else :
transferapi_hook ( exception )
elif isinstance ( exception , exc . AuthAPIError ) :
if exception . code == "UNAUTHORIZED" :
authentication_hook ( exception )
# invalid _ grant occurs when the users refresh tokens are not valid
elif exception . message == "invalid_grant" :
invalidrefresh_hook ( exception )
else :
authapi_hook ( exception )
elif isinstance ( exception , exc . GlobusAPIError ) :
globusapi_hook ( exception )
# specific checks fell through - - now check if it ' s any kind of
# GlobusError
elif isinstance ( exception , exc . GlobusError ) :
globus_generic_hook ( exception )
# not a GlobusError , not a ClickException - - something like ValueError
# or NotImplementedError bubbled all the way up here : just print it
# out , basically
else :
safeprint ( u"{}: {}" . format ( exception_type . __name__ , exception ) )
sys . exit ( 1 )
|
def mission_count_send ( self , target_system , target_component , count , force_mavlink1 = False ) :
'''This message is emitted as response to MISSION _ REQUEST _ LIST by the MAV
and to initiate a write transaction . The GCS can then
request the individual mission item based on the
knowledge of the total number of MISSIONs .
target _ system : System ID ( uint8 _ t )
target _ component : Component ID ( uint8 _ t )
count : Number of mission items in the sequence ( uint16 _ t )'''
|
return self . send ( self . mission_count_encode ( target_system , target_component , count ) , force_mavlink1 = force_mavlink1 )
|
def normalize_pred_string ( predstr ) :
"""Normalize the predicate string * predstr * to a conventional form .
This makes predicate strings more consistent by removing quotes and
the ` _ rel ` suffix , and by lowercasing them .
Examples :
> > > normalize _ pred _ string ( ' " _ dog _ n _ 1 _ rel " ' )
' _ dog _ n _ 1'
> > > normalize _ pred _ string ( ' _ dog _ n _ 1 ' )
' _ dog _ n _ 1'"""
|
tokens = [ t for t in split_pred_string ( predstr ) [ : 3 ] if t is not None ]
if predstr . lstrip ( '\'"' ) [ : 1 ] == '_' :
tokens = [ '' ] + tokens
return '_' . join ( tokens ) . lower ( )
|
def _groupby_new_state ( index , outputs , decisions ) :
"""Groups the simulants in the index by their new output state .
Parameters
index : iterable of ints
An iterable of integer labels for the simulants .
outputs : iterable
A list of possible output states .
decisions : ` pandas . Series `
A series containing the name of the next state for each simulant in the index .
Returns
iterable of 2 - tuples
The first item in each tuple is the name of an output state and the second item
is a ` pandas . Index ` representing the simulants to transition into that state ."""
|
output_map = { o : i for i , o in enumerate ( outputs ) }
groups = pd . Series ( index ) . groupby ( [ output_map [ d ] for d in decisions ] )
results = [ ( outputs [ i ] , pd . Index ( sub_group . values ) ) for i , sub_group in groups ]
selected_outputs = [ o for o , _ in results ]
for output in outputs :
if output not in selected_outputs :
results . append ( ( output , pd . Index ( [ ] ) ) )
return results
|
def run_custom_config_command ( self , custom_command ) :
"""Execute custom command in configuration mode on device
: param custom _ command : command
: return : result of command execution"""
|
return self . run_command_flow . execute_flow ( custom_command = custom_command , is_config = True )
|
def job_ids ( log_stream ) :
"""Grep out all lines with scancel example ."""
|
id_rows = [ line for line in log_stream if 'scancel' in line ]
jobs = [ id_row . strip ( ) [ - 7 : - 1 ] for id_row in id_rows ]
return jobs
|
def get_url_and_bitrate ( self , song_title ) :
"""根据歌名搜索320k地址"""
|
song_id , bitrate = self . get_song_id ( song_title )
url = 'http://m1.music.126.net/'
if song_id :
url += self . encrypted_id ( song_id ) + '/' + str ( song_id ) + '.mp3'
bitrate = str ( bitrate / 1000 )
return url , bitrate
else :
return None , None
|
def _trigger_refresh ( self , key ) :
"Triggers update to a plot on a refresh event"
|
if self . top_level :
self . update ( key )
else :
self . current_key = None
self . current_frame = None
|
def get_callable_from_line ( self , module_file , lineno ) :
"""Get the callable that the line number belongs to ."""
|
module_name = _get_module_name_from_fname ( module_file )
if module_name not in self . _modules_dict :
self . trace ( [ module_file ] )
ret = None
# Sort callables by starting line number
iobj = sorted ( self . _modules_dict [ module_name ] , key = lambda x : x [ "code_id" ] [ 1 ] )
for value in iobj :
if value [ "code_id" ] [ 1 ] <= lineno <= value [ "last_lineno" ] :
ret = value [ "name" ]
elif value [ "code_id" ] [ 1 ] > lineno :
break
return ret if ret else module_name
|
def add_key_filters ( self , key_filters ) :
"""Adds key filters to the inputs .
: param key _ filters : a list of filters
: type key _ filters : list
: rtype : : class : ` RiakMapReduce `"""
|
if self . _input_mode == 'query' :
raise ValueError ( 'Key filters are not supported in a query.' )
self . _key_filters . extend ( key_filters )
return self
|
def map2cube ( data_map , layout ) :
r"""Map to cube
This method transforms the input data from a 2D map with given layout to
a 3D cube
Parameters
data _ map : np . ndarray
Input data map , 2D array
layout : tuple
2D layout of 2D images
Returns
np . ndarray 3D cube
Raises
ValueError
For invalid layout
Examples
> > > from modopt . base . transform import map2cube
> > > a = np . array ( [ [ 0 , 1 , 4 , 5 ] , [ 2 , 3 , 6 , 7 ] , [ 8 , 9 , 12 , 13 ] ,
[10 , 11 , 14 , 15 ] ] )
> > > map2cube ( a , ( 2 , 2 ) )
array ( [ [ [ 0 , 1 ] ,
[ 2 , 3 ] ] ,
[ [ 4 , 5 ] ,
[ 6 , 7 ] ] ,
[ [ 8 , 9 ] ,
[10 , 11 ] ] ,
[ [ 12 , 13 ] ,
[14 , 15 ] ] ] )"""
|
if np . all ( np . array ( data_map . shape ) % np . array ( layout ) ) :
raise ValueError ( 'The desired layout must be a multiple of the number ' 'pixels in the data map.' )
d_shape = np . array ( data_map . shape ) // np . array ( layout )
return np . array ( [ data_map [ ( slice ( i * d_shape [ 0 ] , ( i + 1 ) * d_shape [ 0 ] ) , slice ( j * d_shape [ 1 ] , ( j + 1 ) * d_shape [ 1 ] ) ) ] for i in range ( layout [ 0 ] ) for j in range ( layout [ 1 ] ) ] )
|
def _url_builder ( url_root , api_key , path , params ) :
"""Helper funcation to build a parameterized url ."""
|
params [ 'api_key' ] = api_key
url_end = urlencode ( params )
url = "%s%s%s" % ( url_root , path , url_end )
return url
|
def cloneInto ( self , newStore , avatars ) :
"""Create a copy of this LoginAccount and all associated LoginMethods in a different Store .
Return the copied LoginAccount ."""
|
la = LoginAccount ( store = newStore , password = self . password , avatars = avatars , disabled = self . disabled )
for siteMethod in self . store . query ( LoginMethod , LoginMethod . account == self ) :
LoginMethod ( store = newStore , localpart = siteMethod . localpart , domain = siteMethod . domain , internal = siteMethod . internal , protocol = siteMethod . protocol , verified = siteMethod . verified , account = la )
return la
|
def iso_payment_reference_validator ( v : str ) :
"""Validates ISO reference number checksum .
: param v : Reference number"""
|
num = ''
v = STRIP_WHITESPACE . sub ( '' , v )
for ch in v [ 4 : ] + v [ 0 : 4 ] :
x = ord ( ch )
if ord ( '0' ) <= x <= ord ( '9' ) :
num += ch
else :
x -= 55
if x < 10 or x > 35 :
raise ValidationError ( _ ( 'Invalid payment reference: {}' ) . format ( v ) )
num += str ( x )
res = Decimal ( num ) % Decimal ( '97' )
if res != Decimal ( '1' ) :
raise ValidationError ( _ ( 'Invalid payment reference: {}' ) . format ( v ) )
|
def get_default_view_path ( resource ) :
"Returns the dotted path to the default view class ."
|
parts = [ a . member_name for a in resource . ancestors ] + [ resource . collection_name or resource . member_name ]
if resource . prefix :
parts . insert ( - 1 , resource . prefix )
view_file = '%s' % '_' . join ( parts )
view = '%s:%sView' % ( view_file , snake2camel ( view_file ) )
app_package_name = get_app_package_name ( resource . config )
return '%s.views.%s' % ( app_package_name , view )
|
def body ( self , data ) :
"""Set the POST / PUT body content for this request ."""
|
if data is not None :
self . _body = data
self . add_header ( 'Content-Length' , str ( len ( self . _body ) ) )
|
def register_prefix ( self , nspair ) :
"""Register with ElementTree a set of namespaces
: param nspair : A dictionary of prefixes and uris to use when
constructing the text representation .
: return :"""
|
for prefix , uri in nspair . items ( ) :
try :
ElementTree . register_namespace ( prefix , uri )
except AttributeError : # Backwards compatibility with ET < 1.3
ElementTree . _namespace_map [ uri ] = prefix
except ValueError :
pass
|
async def AddSubnets ( self , subnets ) :
'''subnets : typing . Sequence [ ~ AddSubnetParams ]
Returns - > typing . Sequence [ ~ ErrorResult ]'''
|
# map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'Subnets' , request = 'AddSubnets' , version = 2 , params = _params )
_params [ 'subnets' ] = subnets
reply = await self . rpc ( msg )
return reply
|
def create_color_scheme ( background = None , foreground = None , error = None , custom = None , red = None , green = None , yellow = None , blue = None , magenta = None , cyan = None ) :
"""Utility function that creates a color scheme instance , with default values .
The default colors are chosen based on the current palette .
: param background : background color
: param foreground : foreground color
: param error : color of error messages ( stderr )
: param custom : color of custom messages ( e . g . to print the full command or the process exit code )
: param red : value of the red ANSI color
: param green : value of the green ANSI color
: param yellow : value of the yellow ANSI color
: param blue : value of the blue ANSI color
: param magenta : value of the magenta ANSI color
: param cyan : value of the cyan ANSI color
: return : A ColorScheme instance ."""
|
if background is None :
background = qApp . palette ( ) . base ( ) . color ( )
if foreground is None :
foreground = qApp . palette ( ) . text ( ) . color ( )
is_light = background . lightness ( ) >= 128
if error is None :
if is_light :
error = QColor ( 'dark red' )
else :
error = QColor ( '#FF5555' )
if red is None :
red = QColor ( error )
if green is None :
if is_light :
green = QColor ( 'dark green' )
else :
green = QColor ( '#55FF55' )
if yellow is None :
if is_light :
yellow = QColor ( '#aaaa00' )
else :
yellow = QColor ( '#FFFF55' )
if blue is None :
if is_light :
blue = QColor ( 'dark blue' )
else :
blue = QColor ( '#5555FF' )
if magenta is None :
if is_light :
magenta = QColor ( 'dark magenta' )
else :
magenta = QColor ( '#FF55FF' )
if cyan is None :
if is_light :
cyan = QColor ( 'dark cyan' )
else :
cyan = QColor ( '#55FFFF' )
if custom is None :
custom = QColor ( 'orange' )
return OutputWindow . ColorScheme ( background , foreground , error , custom , red , green , yellow , blue , magenta , cyan )
|
def dev_moments ( self ) :
"""Sum of the absolute deviations between the central moments of the
instantaneous unit hydrograph and the ARMA approximation ."""
|
return numpy . sum ( numpy . abs ( self . moments - self . ma . moments ) )
|
def create_manager ( self , yosai , settings , session_attributes ) :
"""Order of execution matters . The sac must be set before the cache _ handler is
instantiated so that the cache _ handler ' s serialization manager instance
registers the sac ."""
|
mgr_settings = SecurityManagerSettings ( settings )
attributes = mgr_settings . attributes
realms = self . _init_realms ( settings , attributes [ 'realms' ] )
session_attributes = self . _init_session_attributes ( session_attributes , attributes )
serialization_manager = SerializationManager ( session_attributes , serializer_scheme = attributes [ 'serializer' ] )
# the cache _ handler doesn ' t initialize a cache _ realm until it gets
# a serialization manager , which is assigned within the SecurityManager
cache_handler = self . _init_cache_handler ( settings , attributes [ 'cache_handler' ] , serialization_manager )
manager = mgr_settings . security_manager ( yosai , settings , realms = realms , cache_handler = cache_handler , serialization_manager = serialization_manager )
return manager
|
def path_lookup ( data_obj , xj_path , create_dict_path = False ) :
"""Looks up a xj path in the data _ obj .
: param dict | list data _ obj : An object to look into .
: param str xj _ path : A path to extract data from .
: param bool create _ dict _ path : Create an element if type is specified .
: return : A tuple where 0 value is an extracted value and a second
field that tells if value either was found or not found ."""
|
if not xj_path or xj_path == '.' :
return data_obj , True
res = list ( split ( xj_path , '.' , maxsplit = 1 ) )
top_key = res [ 0 ]
leftover = res [ 1 ] if len ( res ) > 1 else None
if top_key == '*' :
return _full_sub_array ( data_obj , leftover , create_dict_path )
elif top_key . startswith ( '@' ) :
return _single_array_element ( data_obj , leftover , top_key , create_dict_path )
else :
val_type , top_key = _clean_key_type ( top_key )
top_key = unescape ( top_key )
if top_key in data_obj :
value = data_obj [ top_key ]
if val_type is not None and not isinstance ( value , val_type ) :
raise XJPathError ( 'Key %s expects type "%s", but found value type is "%s"' % ( top_key , val_type . __name__ , type ( value ) . __name__ ) )
if leftover :
return path_lookup ( value , leftover , create_dict_path )
else :
return value , True
else :
if val_type is not None :
if not isinstance ( data_obj , dict ) :
raise XJPathError ( 'Accessed object must be a dict type ' 'for the key: "%s"' % top_key )
if create_dict_path :
data_obj [ top_key ] = val_type ( )
else :
return None , False
if leftover :
return path_lookup ( data_obj [ top_key ] , leftover , create_dict_path )
else :
return data_obj [ top_key ] , True
return None , False
|
def apply_with ( self , _ , val , ctx ) :
"""constructor
: param val : things used to construct uuid
: type val : uuid as byte , string , or uuid . UUID"""
|
if isinstance ( val , uuid . UUID ) :
self . v = val
elif isinstance ( val , six . string_types ) :
try :
self . v = uuid . UUID ( val )
except ValueError :
self . v = uuid . UUID ( bytes = val )
elif isinstance ( val , six . binary_type ) : # TODO : how to support bytes _ le ?
self . v = uuid . UUID ( bytes = val )
else :
raise ValueError ( 'Unrecognized type for UUID: ' + str ( type ( val ) ) )
|
def _build_authorization_request_url ( self , response_type , redirect_url , state = None ) :
"""Form URL to request an auth code or access token .
Parameters
response _ type ( str )
Either ' code ' ( Authorization Code Grant ) or
' token ' ( Implicit Grant )
redirect _ url ( str )
The URL that the Uber server will redirect the user to after
finishing authorization . The redirect must be HTTPS - based and
match the URL you registered your application with . Localhost
URLs are permitted and can be either HTTP or HTTPS .
state ( str )
Optional CSRF State token to send to server .
Returns
( str )
The fully constructed authorization request URL .
Raises
UberIllegalState ( ApiError )
Raised if response _ type parameter is invalid ."""
|
if response_type not in auth . VALID_RESPONSE_TYPES :
message = '{} is not a valid response type.'
raise UberIllegalState ( message . format ( response_type ) )
args = OrderedDict ( [ ( 'scope' , ' ' . join ( self . scopes ) ) , ( 'state' , state ) , ( 'redirect_uri' , redirect_url ) , ( 'response_type' , response_type ) , ( 'client_id' , self . client_id ) , ] )
return build_url ( auth . AUTH_HOST , auth . AUTHORIZE_PATH , args )
|
def mtime ( self , key ) :
"""Return the last modification time for the cache record with key .
May be useful for cache instances where the stored values can get
' stale ' , such as caching file or network resource contents ."""
|
if key not in self . __dict :
raise CacheKeyError ( key )
else :
node = self . __dict [ key ]
return node . mtime
|
async def message_fields ( self , msg , fields , obj = None ) :
"""Load / dump individual message fields
: param msg :
: param fields :
: param obj :
: return :"""
|
for field in fields :
await self . message_field ( msg , field , obj )
return msg
|
def check_coordinate_types ( self , ds ) :
'''Check the axis attribute of coordinate variables
CF § 4 The attribute axis may be attached to a coordinate variable and
given one of the values X , Y , Z or T which stand for a longitude ,
latitude , vertical , or time axis respectively . Alternatively the
standard _ name attribute may be used for direct identification .
: param netCDF4 . Dataset ds : An open netCDF dataset
: rtype : list
: return : List of results'''
|
ret_val = [ ]
for variable in ds . get_variables_by_attributes ( axis = lambda x : x is not None ) :
name = variable . name
# Coordinate compressions should not be checked as a valid
# coordinate , which they are not . They are a mechanism to project
# an array of indices onto a 2 - d grid containing valid coordinates .
if cfutil . is_compression_coordinate ( ds , name ) :
continue
variable = ds . variables [ name ]
# Even though it ' s not allowed in CF 1.6 , it is allowed in CF 1.7
# and we see people do it , often .
if hasattr ( variable , 'cf_role' ) :
continue
# §6.1 allows for labels to be referenced as auxiliary coordinate
# variables , which should not be checked like the rest of the
# coordinates .
if variable . dtype . char == 'S' :
continue
axis = getattr ( variable , 'axis' , None )
if axis is not None :
valid_axis = self . _check_axis ( ds , name )
ret_val . append ( valid_axis )
return ret_val
|
async def add_machine ( self , spec = None , constraints = None , disks = None , series = None ) :
"""Start a new , empty machine and optionally a container , or add a
container to a machine .
: param str spec : Machine specification
Examples : :
( None ) - starts a new machine
' lxd ' - starts a new machine with one lxd container
' lxd : 4 ' - starts a new lxd container on machine 4
' ssh : user @ 10.10.0.3 : / path / to / private / key ' - manually provision
a machine with ssh and the private key used for authentication
' zone = us - east - 1a ' - starts a machine in zone us - east - 1s on AWS
' maas2 . name ' - acquire machine maas2 . name on MAAS
: param dict constraints : Machine constraints , which can contain the
the following keys : :
arch : str
container : str
cores : int
cpu _ power : int
instance _ type : str
mem : int
root _ disk : int
spaces : list ( str )
tags : list ( str )
virt _ type : str
Example : :
constraints = {
' mem ' : 256 * MB ,
' tags ' : [ ' virtual ' ] ,
: param list disks : List of disk constraint dictionaries , which can
contain the following keys : :
count : int
pool : str
size : int
Example : :
disks = [ {
' pool ' : ' rootfs ' ,
' size ' : 10 * GB ,
' count ' : 1,
: param str series : Series , e . g . ' xenial '
Supported container types are : lxd , kvm
When deploying a container to an existing machine , constraints cannot
be used ."""
|
params = client . AddMachineParams ( )
if spec :
if spec . startswith ( "ssh:" ) :
placement , target , private_key_path = spec . split ( ":" )
user , host = target . split ( "@" )
sshProvisioner = provisioner . SSHProvisioner ( host = host , user = user , private_key_path = private_key_path , )
params = sshProvisioner . provision_machine ( )
else :
placement = parse_placement ( spec )
if placement :
params . placement = placement [ 0 ]
params . jobs = [ 'JobHostUnits' ]
if constraints :
params . constraints = client . Value . from_json ( constraints )
if disks :
params . disks = [ client . Constraints . from_json ( o ) for o in disks ]
if series :
params . series = series
# Submit the request .
client_facade = client . ClientFacade . from_connection ( self . connection ( ) )
results = await client_facade . AddMachines ( [ params ] )
error = results . machines [ 0 ] . error
if error :
raise ValueError ( "Error adding machine: %s" % error . message )
machine_id = results . machines [ 0 ] . machine
if spec :
if spec . startswith ( "ssh:" ) : # Need to run this after AddMachines has been called ,
# as we need the machine _ id
await sshProvisioner . install_agent ( self . connection ( ) , params . nonce , machine_id , )
log . debug ( 'Added new machine %s' , machine_id )
return await self . _wait_for_new ( 'machine' , machine_id )
|
def set_position ( self , x , y , width , height ) :
"""Set window top - left corner position and size"""
|
SetWindowPos ( self . _hwnd , None , x , y , width , height , ctypes . c_uint ( 0 ) )
|
def cmd_rccal ( self , args ) :
'''start / stop RC calibration'''
|
if len ( args ) < 1 :
self . print_cal_usage ( )
return
if ( args [ 0 ] == "start" ) :
if len ( args ) > 1 :
self . num_channels = int ( args [ 1 ] )
print ( "Calibrating %u channels" % self . num_channels )
print ( "WARNING: remove propellers from electric planes!!" )
print ( "Push return when ready to calibrate." )
raw_input ( )
self . clear_rc_cal ( )
self . calibrating = True
elif ( args [ 0 ] == "done" ) :
self . calibrating = False
self . apply_rc_cal ( )
else :
self . print_cal_usage ( )
|
def get_cache_data ( request ) :
if 'init' in request . POST :
init = bool ( float ( request . POST [ 'init' ] ) )
else :
init = False
active_variables = [ ]
if 'variables[]' in request . POST :
active_variables = request . POST . getlist ( 'variables[]' )
"""else :
active _ variables = list (
GroupDisplayPermission . objects . filter ( hmi _ group _ _ in = request . user . groups . iterator ( ) ) . values _ list (
' charts _ _ variables ' , flat = True ) )
active _ variables + = list (
GroupDisplayPermission . objects . filter ( hmi _ group _ _ in = request . user . groups . iterator ( ) ) . values _ list (
' xy _ charts _ _ variables ' , flat = True ) )
active _ variables + = list (
GroupDisplayPermission . objects . filter ( hmi _ group _ _ in = request . user . groups . iterator ( ) ) . values _ list (
' control _ items _ _ variable ' , flat = True ) )
active _ variables + = list (
GroupDisplayPermission . objects . filter ( hmi _ group _ _ in = request . user . groups . iterator ( ) ) . values _ list (
' custom _ html _ panels _ _ variables ' , flat = True ) )
active _ variables = list ( set ( active _ variables ) )"""
|
active_variable_properties = [ ]
if 'variable_properties[]' in request . POST :
active_variable_properties = request . POST . getlist ( 'variable_properties[]' )
timestamp_from = time . time ( )
if 'timestamp_from' in request . POST :
timestamp_from = float ( request . POST [ 'timestamp_from' ] ) / 1000.0
timestamp_to = time . time ( )
if 'timestamp_to' in request . POST :
timestamp_to = min ( timestamp_to , float ( request . POST [ 'timestamp_to' ] ) / 1000.0 )
if timestamp_to == 0 :
timestamp_to = time . time ( )
if timestamp_from == 0 :
timestamp_from == time . time ( ) - 60
if timestamp_to - timestamp_from > 120 * 60 :
timestamp_from = timestamp_to - 120 * 60
# if not init :
# timestamp _ to = min ( timestamp _ from + 30 , timestamp _ to )
if len ( active_variables ) > 0 :
data = RecordedData . objects . db_data ( variable_ids = active_variables , time_min = timestamp_from , time_max = timestamp_to , time_in_ms = True , query_first_value = init )
else :
data = None
if data is None :
data = { }
data [ 'variable_properties' ] = { }
for item in VariableProperty . objects . filter ( pk__in = active_variable_properties ) :
data [ 'variable_properties' ] [ item . pk ] = item . value ( )
data [ "server_time" ] = time . time ( ) * 1000
return HttpResponse ( json . dumps ( data ) , content_type = 'application/json' )
|
def handle_refundtransfer ( mediator_state : MediatorTransferState , mediator_state_change : ReceiveTransferRefund , channelidentifiers_to_channels : ChannelMap , nodeaddresses_to_networkstates : NodeNetworkStateMap , pseudo_random_generator : random . Random , block_number : BlockNumber , ) -> TransitionResult [ MediatorTransferState ] :
"""Validate and handle a ReceiveTransferRefund mediator _ state change .
A node might participate in mediated transfer more than once because of
refund transfers , eg . A - B - C - B - D - T , B tried to mediate the transfer through
C , which didn ' t have an available route to proceed and refunds B , at this
point B is part of the path again and will try a new partner to proceed
with the mediation through D , D finally reaches the target T .
In the above scenario B has two pairs of payer and payee transfers :
payer : A payee : C from the first SendLockedTransfer
payer : C payee : D from the following SendRefundTransfer
Args :
mediator _ state ( MediatorTransferState ) : Current mediator _ state .
mediator _ state _ change ( ReceiveTransferRefund ) : The mediator _ state change .
Returns :
TransitionResult : The resulting iteration ."""
|
events : List [ Event ] = list ( )
if mediator_state . secret is None : # The last sent transfer is the only one that may be refunded , all the
# previous ones are refunded already .
transfer_pair = mediator_state . transfers_pair [ - 1 ]
payee_transfer = transfer_pair . payee_transfer
payer_transfer = mediator_state_change . transfer
channel_identifier = payer_transfer . balance_proof . channel_identifier
payer_channel = channelidentifiers_to_channels . get ( channel_identifier )
if not payer_channel :
return TransitionResult ( mediator_state , list ( ) )
is_valid , channel_events , _ = channel . handle_refundtransfer ( received_transfer = payee_transfer , channel_state = payer_channel , refund = mediator_state_change , )
if not is_valid :
return TransitionResult ( mediator_state , channel_events )
iteration = mediate_transfer ( mediator_state , mediator_state_change . routes , payer_channel , channelidentifiers_to_channels , nodeaddresses_to_networkstates , pseudo_random_generator , payer_transfer , block_number , )
events . extend ( channel_events )
events . extend ( iteration . events )
iteration = TransitionResult ( mediator_state , events )
return iteration
|
def inverse ( self ) :
"""The inverse of this transform ."""
|
if self . _inverse is None :
self . _inverse = InverseTransform ( self )
return self . _inverse
|
def _apply_cluster_dict ( cluster_spec , cluster_dict , vsan_spec = None , vsan_61 = True ) :
'''Applies the values of cluster _ dict dictionary to a cluster spec
( vim . ClusterConfigSpecEx ) .
All vsan values ( cluster _ dict [ ' vsan ' ] ) will be applied to
vsan _ spec ( vim . vsan . cluster . ConfigInfoEx ) . Can be not omitted
if not required .
VSAN 6.1 config needs to be applied differently than the post VSAN 6.1 way .
The type of configuration desired is dictated by the flag vsan _ 61.'''
|
log . trace ( 'Applying cluster dict %s' , cluster_dict )
if cluster_dict . get ( 'ha' ) :
ha_dict = cluster_dict [ 'ha' ]
if not cluster_spec . dasConfig :
cluster_spec . dasConfig = vim . ClusterDasConfigInfo ( )
das_config = cluster_spec . dasConfig
if 'enabled' in ha_dict :
das_config . enabled = ha_dict [ 'enabled' ]
if ha_dict [ 'enabled' ] : # Default values when ha is enabled
das_config . failoverLevel = 1
if 'admission_control_enabled' in ha_dict :
das_config . admissionControlEnabled = ha_dict [ 'admission_control_enabled' ]
if 'admission_control_policy' in ha_dict :
adm_pol_dict = ha_dict [ 'admission_control_policy' ]
if not das_config . admissionControlPolicy or not isinstance ( das_config . admissionControlPolicy , vim . ClusterFailoverResourcesAdmissionControlPolicy ) :
das_config . admissionControlPolicy = vim . ClusterFailoverResourcesAdmissionControlPolicy ( cpuFailoverResourcesPercent = adm_pol_dict [ 'cpu_failover_percent' ] , memoryFailoverResourcesPercent = adm_pol_dict [ 'memory_failover_percent' ] )
if 'default_vm_settings' in ha_dict :
vm_set_dict = ha_dict [ 'default_vm_settings' ]
if not das_config . defaultVmSettings :
das_config . defaultVmSettings = vim . ClusterDasVmSettings ( )
if 'isolation_response' in vm_set_dict :
das_config . defaultVmSettings . isolationResponse = vm_set_dict [ 'isolation_response' ]
if 'restart_priority' in vm_set_dict :
das_config . defaultVmSettings . restartPriority = vm_set_dict [ 'restart_priority' ]
if 'hb_ds_candidate_policy' in ha_dict :
das_config . hBDatastoreCandidatePolicy = ha_dict [ 'hb_ds_candidate_policy' ]
if 'host_monitoring' in ha_dict :
das_config . hostMonitoring = ha_dict [ 'host_monitoring' ]
if 'options' in ha_dict :
das_config . option = [ ]
for opt_dict in ha_dict [ 'options' ] :
das_config . option . append ( vim . OptionValue ( key = opt_dict [ 'key' ] ) )
if 'value' in opt_dict :
das_config . option [ - 1 ] . value = opt_dict [ 'value' ]
if 'vm_monitoring' in ha_dict :
das_config . vmMonitoring = ha_dict [ 'vm_monitoring' ]
cluster_spec . dasConfig = das_config
if cluster_dict . get ( 'drs' ) :
drs_dict = cluster_dict [ 'drs' ]
drs_config = vim . ClusterDrsConfigInfo ( )
if 'enabled' in drs_dict :
drs_config . enabled = drs_dict [ 'enabled' ]
if 'vmotion_rate' in drs_dict :
drs_config . vmotionRate = 6 - drs_dict [ 'vmotion_rate' ]
if 'default_vm_behavior' in drs_dict :
drs_config . defaultVmBehavior = vim . DrsBehavior ( drs_dict [ 'default_vm_behavior' ] )
cluster_spec . drsConfig = drs_config
if cluster_dict . get ( 'vm_swap_placement' ) :
cluster_spec . vmSwapPlacement = cluster_dict [ 'vm_swap_placement' ]
if cluster_dict . get ( 'vsan' ) :
vsan_dict = cluster_dict [ 'vsan' ]
if not vsan_61 : # VSAN is 6.2 and above
if 'enabled' in vsan_dict :
if not vsan_spec . vsanClusterConfig :
vsan_spec . vsanClusterConfig = vim . vsan . cluster . ConfigInfo ( )
vsan_spec . vsanClusterConfig . enabled = vsan_dict [ 'enabled' ]
if 'auto_claim_storage' in vsan_dict :
if not vsan_spec . vsanClusterConfig :
vsan_spec . vsanClusterConfig = vim . vsan . cluster . ConfigInfo ( )
if not vsan_spec . vsanClusterConfig . defaultConfig :
vsan_spec . vsanClusterConfig . defaultConfig = vim . VsanClusterConfigInfoHostDefaultInfo ( )
elif vsan_spec . vsanClusterConfig . defaultConfig . uuid : # If this remains set it caused an error
vsan_spec . vsanClusterConfig . defaultConfig . uuid = None
vsan_spec . vsanClusterConfig . defaultConfig . autoClaimStorage = vsan_dict [ 'auto_claim_storage' ]
if 'compression_enabled' in vsan_dict :
if not vsan_spec . dataEfficiencyConfig :
vsan_spec . dataEfficiencyConfig = vim . vsan . DataEfficiencyConfig ( )
vsan_spec . dataEfficiencyConfig . compressionEnabled = vsan_dict [ 'compression_enabled' ]
if 'dedup_enabled' in vsan_dict :
if not vsan_spec . dataEfficiencyConfig :
vsan_spec . dataEfficiencyConfig = vim . vsan . DataEfficiencyConfig ( )
vsan_spec . dataEfficiencyConfig . dedupEnabled = vsan_dict [ 'dedup_enabled' ]
# In all cases we need to configure the vsan on the cluster
# directly so not to have a missmatch between vsan _ spec and
# cluster _ spec
if not cluster_spec . vsanConfig :
cluster_spec . vsanConfig = vim . VsanClusterConfigInfo ( )
vsan_config = cluster_spec . vsanConfig
if 'enabled' in vsan_dict :
vsan_config . enabled = vsan_dict [ 'enabled' ]
if 'auto_claim_storage' in vsan_dict :
if not vsan_config . defaultConfig :
vsan_config . defaultConfig = vim . VsanClusterConfigInfoHostDefaultInfo ( )
elif vsan_config . defaultConfig . uuid : # If this remains set it caused an error
vsan_config . defaultConfig . uuid = None
vsan_config . defaultConfig . autoClaimStorage = vsan_dict [ 'auto_claim_storage' ]
log . trace ( 'cluster_spec = %s' , cluster_spec )
|
def perl_cmd ( ) :
"""Retrieve path to locally installed conda Perl or first in PATH ."""
|
perl = which ( os . path . join ( get_bcbio_bin ( ) , "perl" ) )
if perl :
return perl
else :
return which ( "perl" )
|
def validate_rpc_host ( ip ) :
"""Validates the given ip for use as RPC server address ."""
|
if not is_valid_ipv4 ( ip ) and not is_valid_ipv6 ( ip ) :
raise ApplicationException ( desc = 'Invalid RPC ip address: %s' % ip )
return ip
|
def write ( self , data ) :
"""Write string data to current process input stream ."""
|
data = data . encode ( 'utf-8' )
data_p = ctypes . create_string_buffer ( data )
num_bytes = PLARGE_INTEGER ( LARGE_INTEGER ( 0 ) )
bytes_to_write = len ( data )
success = WriteFile ( self . conin_pipe , data_p , bytes_to_write , num_bytes , None )
return success , num_bytes [ 0 ]
|
def inv ( self ) :
"""The inverse translation"""
|
result = Translation ( - self . t )
result . _cache_inv = self
return result
|
def _flatten_samples ( samples , base_file , get_retriever ) :
"""Create a flattened JSON representation of data from the bcbio world map ."""
|
flat_data = [ ]
for data in samples :
data [ "reference" ] = _indexes_to_secondary_files ( data [ "reference" ] , data [ "genome_build" ] )
cur_flat = { }
for key_path in [ [ "analysis" ] , [ "description" ] , [ "rgnames" ] , [ "config" , "algorithm" ] , [ "metadata" ] , [ "genome_build" ] , [ "resources" ] , [ "files" ] , [ "reference" ] , [ "genome_resources" ] , [ "vrn_file" ] ] :
cur_key = "__" . join ( key_path )
for flat_key , flat_val in _to_cwldata ( cur_key , tz . get_in ( key_path , data ) , get_retriever ) :
cur_flat [ flat_key ] = flat_val
flat_data . append ( cur_flat )
out = { }
for key in sorted ( list ( set ( reduce ( operator . add , [ list ( d . keys ( ) ) for d in flat_data ] ) ) ) ) : # Periods in keys cause issues with WDL and some CWL implementations
clean_key = key . replace ( "." , "_" )
out [ clean_key ] = [ ]
for cur_flat in flat_data :
out [ clean_key ] . append ( cur_flat . get ( key ) )
# special case for back - compatibility with fasta specifications - - yuck
if "reference__fasta__base" not in out and "reference__fasta" in out :
out [ "reference__fasta__base" ] = out [ "reference__fasta" ]
del out [ "reference__fasta" ]
return _samplejson_to_inputs ( out ) , out
|
def preserve_cwd ( func : Callable ) -> Callable :
"""Decorator to preserve the current working directory in calls to the
decorated function .
Example :
. . code - block : : python
@ preserve _ cwd
def myfunc ( ) :
os . chdir ( " / faraway " )
os . chdir ( " / home " )
myfunc ( )
assert os . getcwd ( ) = = " / home " """
|
# http : / / stackoverflow . com / questions / 169070 / python - how - do - i - write - a - decorator - that - restores - the - cwd # noqa
def decorator ( * args_ , ** kwargs ) -> Any :
cwd = os . getcwd ( )
result = func ( * args_ , ** kwargs )
os . chdir ( cwd )
return result
return decorator
|
def realtime_learning_curves ( runs ) :
"""example how to extract a different kind of learning curve .
The x values are now the time the runs finished , not the budget anymore .
We no longer plot the validation loss on the y axis , but now the test accuracy .
This is just to show how to get different information into the interactive plot ."""
|
sr = sorted ( runs , key = lambda r : r . budget )
lc = list ( filter ( lambda t : not t [ 1 ] is None , [ ( r . time_stamps [ 'finished' ] , r . info [ 'test accuracy' ] ) for r in sr ] ) )
return ( [ lc , ] )
|
def configure ( self , transport , auth , address , port ) :
"""Connect paramiko transport
: type auth : : py : class ` margaritashotgun . auth . AuthMethods `
: param auth : authentication object
: type address : str
: param address : remote server ip or hostname
: type port : int
: param port : remote server port
: type hostkey : : py : class : ` paramiko . key . HostKey `
: param hostkey : remote host ssh server key"""
|
self . transport = transport
self . username = auth . username
self . address = address
self . port = port
|
def activities ( self , limit = 1 , event = None ) :
"""Return device activity information ."""
|
activities = self . _activities or [ ]
# Filter our activity array if requested
if event :
activities = list ( filter ( lambda activity : activity [ CONST . EVENT ] == event , activities ) )
# Return the requested number
return activities [ : limit ]
|
def update ( self , key : str , data : np . ndarray ) -> None :
"""Update entry in datastore"""
|
self . data [ key ] = data
|
def get_random_ontology ( TOP_RANGE = 10 , pattern = "" ) :
"""for testing purposes . Returns a random ontology / graph"""
|
choices = get_localontologies ( pattern = pattern )
try :
ontouri = choices [ random . randint ( 0 , TOP_RANGE ) ]
except :
ontouri = choices [ 0 ]
print ( "Testing with URI: %s" % ontouri )
g = get_pickled_ontology ( ontouri )
if not g :
g = do_pickle_ontology ( ontouri )
return ontouri , g
|
def encode ( self , s ) :
"""Encode special characters found in string I { s } .
@ param s : A string to encode .
@ type s : str
@ return : The encoded string .
@ rtype : str"""
|
if isinstance ( s , str ) and self . needsEncoding ( s ) :
for x in self . encodings :
s = re . sub ( x [ 0 ] , x [ 1 ] , s )
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.