signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def _determine_timeinterval ( self ) :
"""Return a dictionary with two datetime objects , start _ time and end _ time ,
covering the interval of the training job"""
|
description = self . _sage_client . describe_training_job ( TrainingJobName = self . name )
start_time = self . _start_time or description [ u'TrainingStartTime' ]
# datetime object
# Incrementing end time by 1 min since CloudWatch drops seconds before finding the logs .
# This results in logs being searched in the time range in which the correct log line was not present .
# Example - Log time - 2018-10-22 08:25:55
# Here calculated end time would also be 2018-10-22 08:25:55 ( without 1 min addition )
# CW will consider end time as 2018-10-22 08:25 and will not be able to search the correct log .
end_time = self . _end_time or description . get ( u'TrainingEndTime' , datetime . datetime . utcnow ( ) ) + datetime . timedelta ( minutes = 1 )
return { 'start_time' : start_time , 'end_time' : end_time , }
|
def clear_waiting_coordinators ( self , cancel = False ) :
'''remove all entries from waiting queue or cancell all in waiting queue'''
|
with self . _lockw :
if cancel :
for _coordinator in self . _waiting_transfer_coordinators :
_coordinator . notify_cancelled ( "Clear Waiting Queue" , False )
self . _waiting_transfer_coordinators . clear ( )
|
def cd ( * args ) :
"""An argument of - is equivalent to $ OLDPWD . If - is the first argument , and
the directory change is successful , the absolute pathname of the new
working directory is written to the standard output ."""
|
if args [ 0 ] == "-" :
try :
newpwd , os . environ [ "OLDPWD" ] = os . environ [ "OLDPWD" ] , os . getcwd ( )
except KeyError as e : # $ OLDPWD initially not set
raise e
else :
os . chdir ( newpwd )
print ( newpwd )
else :
os . environ [ "OLDPWD" ] = os . getcwd ( )
os . chdir ( * args )
|
def _sanitize_column_names ( data ) :
"""Replace illegal characters with underscore"""
|
new_names = { }
for name in data . columns :
new_names [ name ] = _ILLEGAL_CHARACTER_PAT . sub ( "_" , name )
return new_names
|
def get_job_output ( self , job_id ) :
"""GetJobOutput
https : / / apidocs . joyent . com / manta / api . html # GetJobOutput"""
|
log . debug ( "GetJobOutput %r" , job_id )
path = "/%s/jobs/%s/live/out" % ( self . account , job_id )
res , content = self . _request ( path , "GET" )
if res [ "status" ] != "200" :
raise errors . MantaAPIError ( res , content )
keys = content . splitlines ( False )
return keys
|
def inverse_cdf ( su , W ) :
"""Inverse CDF algorithm for a finite distribution .
Parameters
su : ( M , ) ndarray
M sorted uniform variates ( i . e . M ordered points in [ 0,1 ] ) .
W : ( N , ) ndarray
a vector of N normalized weights ( > = 0 and sum to one )
Returns
A : ( M , ) ndarray
a vector of M indices in range 0 , . . . , N - 1"""
|
j = 0
s = W [ 0 ]
M = su . shape [ 0 ]
A = np . empty ( M , 'int' )
for n in range ( M ) :
while su [ n ] > s :
j += 1
s += W [ j ]
A [ n ] = j
return A
|
def _append_date ( self , value , _file ) :
"""Call this function to write date contents .
Keyword arguments :
* value - dict , content to be dumped
* _ file - FileIO , output file"""
|
_tabs = '\t' * self . _tctr
_text = value . strftime ( '%Y-%m-%dT%H:%M:%S.%fZ' )
_labs = '{tabs}<date>{text}</date>\n' . format ( tabs = _tabs , text = _text )
_file . write ( _labs )
|
def banlist ( self , channel ) :
"""Get the channel banlist .
Required arguments :
* channel - Channel of which to get the banlist for ."""
|
with self . lock :
self . is_in_channel ( channel )
self . send ( 'MODE %s b' % channel )
bans = [ ]
while self . readable ( ) :
msg = self . _recv ( expected_replies = ( '367' , '368' ) )
if msg [ 0 ] == '367' :
banmask , who , timestamp = msg [ 2 ] . split ( ) [ 1 : ]
bans . append ( ( self . _from_ ( banmask ) , who , self . _m_time . localtime ( int ( timestamp ) ) ) )
elif msg [ 0 ] == '368' :
break
return bans
|
def portfolio_value ( portfolio , date , price = 'close' ) :
"""Total value of a portfolio ( dict mapping symbols to numbers of shares )
$ CASH used as symbol for USD"""
|
value = 0.0
for ( sym , sym_shares ) in portfolio . iteritems ( ) :
sym_price = None
if sym_shares :
sym_price = get_price ( symbol = sym , date = date , price = price )
# print sym , sym _ shares , sym _ price
# print last _ date , k , price
if sym_price != None :
if np . isnan ( sym_price ) :
print 'Invalid price, shares, value, total: ' , sym_price , sym_shares , ( float ( sym_shares ) * float ( sym_price ) ) if sym_shares and sym_price else 'Invalid' , value
if sym_shares :
return float ( 'nan' )
else : # print ( ' { 0 } shares of { 1 } = { 2 } * { 3 } = { 4 } ' . format ( sym _ shares , sym , sym _ shares , sym _ price , sym _ shares * sym _ price ) )
value += sym_shares * sym_price
# print ' new price , value = { 0 } , { 1 } ' . format ( sym _ price , value )
return value
|
def runner_doc ( * args ) :
'''Return the docstrings for all runners . Optionally , specify a runner or a
function to narrow the selection .
The strings are aggregated into a single document on the master for easy
reading .
Multiple runners / functions can be specified .
. . versionadded : : 2014.7.0
CLI Example :
. . code - block : : bash
salt ' * ' sys . runner _ doc
salt ' * ' sys . runner _ doc cache
salt ' * ' sys . runner _ doc cache . grains
salt ' * ' sys . runner _ doc cache . grains mine . get
Runner names can be specified as globs .
. . versionadded : : 2015.5.0
. . code - block : : bash
salt ' * ' sys . runner _ doc ' cache . clear _ * ' '''
|
run_ = salt . runner . Runner ( __opts__ )
docs = { }
if not args :
for fun in run_ . functions :
docs [ fun ] = run_ . functions [ fun ] . __doc__
return _strip_rst ( docs )
for module in args :
_use_fnmatch = False
if '*' in module :
target_mod = module
_use_fnmatch = True
elif module : # allow both " sys " and " sys . " to match sys , without also matching
# sysctl
target_mod = module + '.' if not module . endswith ( '.' ) else module
else :
target_mod = ''
if _use_fnmatch :
for fun in fnmatch . filter ( run_ . functions , target_mod ) :
docs [ fun ] = run_ . functions [ fun ] . __doc__
else :
for fun in run_ . functions :
if fun == module or fun . startswith ( target_mod ) :
docs [ fun ] = run_ . functions [ fun ] . __doc__
return _strip_rst ( docs )
|
def dim_dtau ( self , pars ) :
r""": math : ` \ frac { \ partial \ hat { \ rho ' ' } ( \ omega ) } { \ partial \ tau } = \ rho _ 0
\ frac { - m \ omega ^ c c \ tau ^ { c - 1 } sin ( \ frac { c \ pi } { 2 } } { 1 + 2 ( \ omega
\ tau ) ^ c cos ( \ frac { c \ pi } { 2 } ) + ( \ omega \ tau ) ^ { 2 c } } +
\ rho _ 0 \ frac { \ left [ - m ( \ omega \ tau ) ^ c sin ( \ frac { c \ pi } { 2}
\ right ] \ cdot \ left [ 2 \ omega ^ c c \ tau ^ { c - 1 } cos ( \ frac { c
\ pi } { 2 } ) + 2 c \ omega ^ { 2 c } \ tau ^ { 2 c - 1 } \ right ] } { \ left [ 1 + 2 ( \ omega
\ tau ) ^ c cos ( \ frac { c \ pi } { 2 } ) + ( \ omega \ tau ) ^ { 2 c } \ right ] ^ 2 } `"""
|
self . _set_parameters ( pars )
# term1
nom1 = - self . m * np . sin ( self . ang ) * self . w ** self . c * self . c * self . tau ** ( self . c - 1 )
term1 = nom1 / self . denom
# term2
nom2 = ( self . m * self . otc * np . sin ( self . ang ) ) * ( 2 * self . w ** self . c * self . c * self . tau ** ( self . c - 1 ) * np . cos ( self . ang ) + 2 * self . c * self . w ** ( 2 * self . c ) * self . tau ** ( 2 * self . c - 1 ) )
term2 = nom2 / self . denom ** 2
result = term1 + term2
result *= self . rho0
return result
|
def joint ( node ) :
"""Merge the bodies of primal and adjoint into a single function .
Args :
node : A module with the primal and adjoint function definitions as returned
by ` reverse _ ad ` .
Returns :
func : A ` Module ` node with a single function definition containing the
combined primal and adjoint ."""
|
node , _ , _ = _fix ( node )
body = node . body [ 0 ] . body [ : - 1 ] + node . body [ 1 ] . body
func = gast . Module ( body = [ gast . FunctionDef ( name = node . body [ 0 ] . name , args = node . body [ 1 ] . args , body = body , decorator_list = [ ] , returns = None ) ] )
# Clean up
anno . clearanno ( func )
return func
|
async def _process_auth_form ( self , html : str ) -> ( str , str ) :
"""Parsing data from authorization page and filling the form and submitting the form
: param html : html page
: return : url and html from redirected page"""
|
# Parse page
p = AuthPageParser ( )
p . feed ( html )
p . close ( )
# Get data from hidden inputs
form_data = dict ( p . inputs )
form_url = p . url
form_data [ 'email' ] = self . login
form_data [ 'pass' ] = self . password
if p . message : # Show form errors
raise VkAuthError ( 'invalid_data' , p . message , form_url , form_data )
elif p . captcha_url :
form_data [ 'captcha_key' ] = await self . enter_captcha ( "https://m.vk.com{}" . format ( p . captcha_url ) , form_data [ 'captcha_sid' ] )
form_url = "https://m.vk.com{}" . format ( form_url )
# Send request
url , html = await self . driver . post_text ( form_url , form_data )
return url , html
|
def randomMails ( self , count = 1 ) :
"""Return random e - mails .
: rtype : list
: returns : list of random e - mails"""
|
self . check_count ( count )
random_nicks = self . rn . random_nicks ( count = count )
random_domains = sample ( self . dmails , count )
return [ nick . lower ( ) + "@" + domain for nick , domain in zip ( random_nicks , random_domains ) ]
|
def load_data ( self , df ) :
"""Wraps the LOAD DATA DDL statement . Loads data into an MapD table from
pandas . DataFrame or pyarrow . Table
Parameters
df : pandas . DataFrame or pyarrow . Table
Returns
query : MapDQuery"""
|
stmt = ddl . LoadData ( self . _qualified_name , df )
return self . _execute ( stmt )
|
def handle_job_and_work_save ( self , sender , instance , ** kwargs ) :
"""Custom handler for job and work save"""
|
self . handle_save ( instance . project . __class__ , instance . project )
|
def mknod ( name , ntype , major = 0 , minor = 0 , user = None , group = None , mode = '0600' ) :
'''Create a special file similar to the ' nix mknod command . The supported
device types are ` ` p ` ` ( fifo pipe ) , ` ` c ` ` ( character device ) , and ` ` b ` `
( block device ) . Provide the major and minor numbers when specifying a
character device or block device . A fifo pipe does not require this
information . The command will create the necessary dirs if needed . If a
file of the same name not of the same type / major / minor exists , it will not
be overwritten or unlinked ( deleted ) . This is logically in place as a
safety measure because you can really shoot yourself in the foot here and
it is the behavior of ' nix ` ` mknod ` ` . It is also important to note that not
just anyone can create special devices . Usually this is only done as root .
If the state is executed as none other than root on a minion , you may
receive a permission error .
name
name of the file
ntype
node type ' p ' ( fifo pipe ) , ' c ' ( character device ) , or ' b '
( block device )
major
major number of the device
does not apply to a fifo pipe
minor
minor number of the device
does not apply to a fifo pipe
user
owning user of the device / pipe
group
owning group of the device / pipe
mode
permissions on the device / pipe
Usage :
. . code - block : : yaml
/ dev / chr :
file . mknod :
- ntype : c
- major : 180
- minor : 31
- user : root
- group : root
- mode : 660
/ dev / blk :
file . mknod :
- ntype : b
- major : 8
- minor : 999
- user : root
- group : root
- mode : 660
/ dev / fifo :
file . mknod :
- ntype : p
- user : root
- group : root
- mode : 660
. . versionadded : : 0.17.0'''
|
name = os . path . expanduser ( name )
ret = { 'name' : name , 'changes' : { } , 'comment' : '' , 'result' : False }
if not name :
return _error ( ret , 'Must provide name to file.mknod' )
if ntype == 'c' : # Check for file existence
if __salt__ [ 'file.file_exists' ] ( name ) :
ret [ 'comment' ] = ( 'File {0} exists and is not a character device. Refusing ' 'to continue' . format ( name ) )
# Check if it is a character device
elif not __salt__ [ 'file.is_chrdev' ] ( name ) :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Character device {0} is set to be created' . format ( name )
ret [ 'result' ] = None
else :
ret = __salt__ [ 'file.mknod' ] ( name , ntype , major , minor , user , group , mode )
# Check the major / minor
else :
devmaj , devmin = __salt__ [ 'file.get_devmm' ] ( name )
if ( major , minor ) != ( devmaj , devmin ) :
ret [ 'comment' ] = ( 'Character device {0} exists and has a different ' 'major/minor {1}/{2}. Refusing to continue' . format ( name , devmaj , devmin ) )
# Check the perms
else :
ret = __salt__ [ 'file.check_perms' ] ( name , None , user , group , mode ) [ 0 ]
if not ret [ 'changes' ] :
ret [ 'comment' ] = ( 'Character device {0} is in the correct state' . format ( name ) )
elif ntype == 'b' : # Check for file existence
if __salt__ [ 'file.file_exists' ] ( name ) :
ret [ 'comment' ] = ( 'File {0} exists and is not a block device. Refusing to ' 'continue' . format ( name ) )
# Check if it is a block device
elif not __salt__ [ 'file.is_blkdev' ] ( name ) :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Block device {0} is set to be created' . format ( name )
ret [ 'result' ] = None
else :
ret = __salt__ [ 'file.mknod' ] ( name , ntype , major , minor , user , group , mode )
# Check the major / minor
else :
devmaj , devmin = __salt__ [ 'file.get_devmm' ] ( name )
if ( major , minor ) != ( devmaj , devmin ) :
ret [ 'comment' ] = ( 'Block device {0} exists and has a different major/minor ' '{1}/{2}. Refusing to continue' . format ( name , devmaj , devmin ) )
# Check the perms
else :
ret = __salt__ [ 'file.check_perms' ] ( name , None , user , group , mode ) [ 0 ]
if not ret [ 'changes' ] :
ret [ 'comment' ] = ( 'Block device {0} is in the correct state' . format ( name ) )
elif ntype == 'p' : # Check for file existence
if __salt__ [ 'file.file_exists' ] ( name ) :
ret [ 'comment' ] = ( 'File {0} exists and is not a fifo pipe. Refusing to ' 'continue' . format ( name ) )
# Check if it is a fifo
elif not __salt__ [ 'file.is_fifo' ] ( name ) :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Fifo pipe {0} is set to be created' . format ( name )
ret [ 'result' ] = None
else :
ret = __salt__ [ 'file.mknod' ] ( name , ntype , major , minor , user , group , mode )
# Check the perms
else :
ret = __salt__ [ 'file.check_perms' ] ( name , None , user , group , mode ) [ 0 ]
if not ret [ 'changes' ] :
ret [ 'comment' ] = ( 'Fifo pipe {0} is in the correct state' . format ( name ) )
else :
ret [ 'comment' ] = ( 'Node type unavailable: \'{0}\'. Available node types are ' 'character (\'c\'), block (\'b\'), and pipe (\'p\')' . format ( ntype ) )
return ret
|
def get_alignment_data ( self , section ) :
"""Get the alignment SAM and Fasta , if present .
: param section : Can be ' template ' , ' complement ' , or ' 2d ' .
: return : A tuple containing the SAM and the section of the reference
aligned to ( both as strings ) . Returns None if no alignment is
present for that section ."""
|
subgroup = '{}/Aligned_{}' . format ( self . group_name , section )
sam = self . handle . get_analysis_dataset ( subgroup , 'SAM' )
fasta = self . handle . get_analysis_dataset ( subgroup , 'Fasta' )
if sam is None or fasta is None :
return None
sequence = fasta . split ( '\n' ) [ 1 ]
return sam , sequence
|
def get_font_matrix ( self ) :
"""Copies the scaled font ’ s font matrix .
: returns : A new : class : ` Matrix ` object ."""
|
matrix = Matrix ( )
cairo . cairo_scaled_font_get_font_matrix ( self . _pointer , matrix . _pointer )
self . _check_status ( )
return matrix
|
def read ( self , addr , size ) :
'''Parameters
addr : int
The register address .
size : int
Length of data to be read ( number of bytes ) .
Returns
array : array
Data ( byte array ) read from memory . Returns 0 for each byte if it hasn ' t been written to .'''
|
logger . debug ( "Dummy SiTransferLayer.read addr: %s size: %s" % ( hex ( addr ) , size ) )
return array . array ( 'B' , [ self . mem [ curr_addr ] if curr_addr in self . mem else 0 for curr_addr in range ( addr , addr + size ) ] )
|
def RegisterSourceType ( cls , source_type_class ) :
"""Registers a source type .
Source types are identified based on their type indicator .
Args :
source _ type _ class ( type ) : source type .
Raises :
KeyError : if source types is already set for the corresponding
type indicator ."""
|
if source_type_class . TYPE_INDICATOR in cls . _source_type_classes :
raise KeyError ( 'Source type already set for type: {0:s}.' . format ( source_type_class . TYPE_INDICATOR ) )
cls . _source_type_classes [ source_type_class . TYPE_INDICATOR ] = ( source_type_class )
|
def do_copy ( self , subcmd , opts , * args ) :
"""Duplicate something in working copy or repository , remembering history .
usage :
copy SRC DST
SRC and DST can each be either a working copy ( WC ) path or URL :
WC - > WC : copy and schedule for addition ( with history )
WC - > URL : immediately commit a copy of WC to URL
URL - > WC : check out URL into WC , schedule for addition
URL - > URL : complete server - side copy ; used to branch & tag
$ { cmd _ option _ list }"""
|
print "'svn %s' opts: %s" % ( subcmd , opts )
print "'svn %s' args: %s" % ( subcmd , args )
|
def scale ( self , scaled_cx , scaled_cy ) :
"""Return scaled image dimensions in EMU based on the combination of
parameters supplied . If * scaled _ cx * and * scaled _ cy * are both | None | ,
the native image size is returned . If neither * scaled _ cx * nor
* scaled _ cy * is | None | , their values are returned unchanged . If
a value is provided for either * scaled _ cx * or * scaled _ cy * and the
other is | None | , the missing value is calculated such that the
image ' s aspect ratio is preserved ."""
|
image_cx , image_cy = self . _native_size
if scaled_cx is None and scaled_cy is None :
scaled_cx = image_cx
scaled_cy = image_cy
elif scaled_cx is None :
scaling_factor = float ( scaled_cy ) / float ( image_cy )
scaled_cx = int ( round ( image_cx * scaling_factor ) )
elif scaled_cy is None :
scaling_factor = float ( scaled_cx ) / float ( image_cx )
scaled_cy = int ( round ( image_cy * scaling_factor ) )
return scaled_cx , scaled_cy
|
def serialize_model ( model ) :
"""Serialize the HTK model into a file .
: param model : Model to be serialized"""
|
result = ''
# First serialize the macros
for macro in model [ 'macros' ] :
if macro . get ( 'options' , None ) :
result += '~o '
for option in macro [ 'options' ] [ 'definition' ] :
result += _serialize_option ( option )
elif macro . get ( 'transition' , None ) :
result += '~t "{}"\n' . format ( macro [ 'transition' ] [ 'name' ] )
result += _serialize_transp ( macro [ 'transition' ] [ 'definition' ] )
elif macro . get ( 'variance' , None ) :
result += '~v "{}"\n' . format ( macro [ 'variance' ] [ 'name' ] )
result += _serialize_variance ( macro [ 'variance' ] [ 'definition' ] )
elif macro . get ( 'state' , None ) :
result += '~s "{}"\n' . format ( macro [ 'state' ] [ 'name' ] )
result += _serialize_stateinfo ( macro [ 'state' ] [ 'definition' ] )
elif macro . get ( 'mean' , None ) :
result += '~u "{}"\n' . format ( macro [ 'mean' ] [ 'name' ] )
result += _serialize_mean ( macro [ 'mean' ] [ 'definition' ] )
elif macro . get ( 'duration' , None ) :
result += '~d "{}"\n' . format ( macro [ 'duration' ] [ 'name' ] )
result += _serialize_duration ( macro [ 'duration' ] [ 'definition' ] )
else :
raise NotImplementedError ( 'Cannot serialize {}' . format ( macro ) )
for hmm in model [ 'hmms' ] :
if hmm . get ( 'name' , None ) is not None :
result += '~h "{}"\n' . format ( hmm [ 'name' ] )
result += _serialize_hmm ( hmm [ 'definition' ] )
return result
|
def project_meta ( self , attributes ) :
"""Projects the specified metadata attributes to new region fields
: param attributes : a list of metadata attributes
: return : a new GDataframe with additional region fields"""
|
if not isinstance ( attributes , list ) :
raise TypeError ( 'attributes must be a list' )
meta_to_project = self . meta [ attributes ] . applymap ( lambda l : ", " . join ( l ) )
new_regs = self . regs . merge ( meta_to_project , left_index = True , right_index = True )
return GDataframe ( regs = new_regs , meta = self . meta )
|
def delete_one_word ( self , word = RIGHT ) :
"""Delete one word the right or the the left of the cursor ."""
|
assert word in ( self . RIGHT , self . LEFT )
if word == self . RIGHT :
papy = self . text . find ( ' ' , self . cursor ) + 1
if not papy :
papy = len ( self . text )
self . text = self . text [ : self . cursor ] + self . text [ papy : ]
else :
papy = self . text . rfind ( ' ' , 0 , self . cursor )
if papy == - 1 :
papy = 0
self . text = self . text [ : papy ] + self . text [ self . cursor : ]
self . cursor = papy
|
def haplotype_caller ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) :
"""Call variation with GATK ' s HaplotypeCaller .
This requires the full non open - source version of GATK ."""
|
if out_file is None :
out_file = "%s-variants.vcf.gz" % utils . splitext_plus ( align_bams [ 0 ] ) [ 0 ]
if not utils . file_exists ( out_file ) :
num_cores = dd . get_num_cores ( items [ 0 ] )
broad_runner , params = _shared_gatk_call_prep ( align_bams , items , ref_file , region , out_file , num_cores )
gatk_type = broad_runner . gatk_type ( )
assert gatk_type in [ "restricted" , "gatk4" ] , "Require full version of GATK 2.4+, or GATK4 for haplotype calling"
with file_transaction ( items [ 0 ] , out_file ) as tx_out_file :
resources = config_utils . get_resources ( "gatk-spark" , items [ 0 ] [ "config" ] )
spark_opts = [ str ( x ) for x in resources . get ( "options" , [ ] ) ]
if _use_spark ( num_cores , gatk_type , items , spark_opts ) :
params += [ "-T" , "HaplotypeCallerSpark" ]
if spark_opts :
params += spark_opts
else :
params += [ "--spark-master" , "local[%s]" % num_cores , "--conf" , "spark.local.dir=%s" % os . path . dirname ( tx_out_file ) , "--conf" , "spark.driver.host=localhost" , "--conf" , "spark.network.timeout=800" , "--conf" , "spark.executor.heartbeatInterval=100" ]
else :
params += [ "-T" , "HaplotypeCaller" ]
params += [ "--annotation" , "ClippingRankSumTest" , "--annotation" , "DepthPerSampleHC" ]
# Enable hardware based optimizations in GATK 3.1 +
if LooseVersion ( broad_runner . gatk_major_version ( ) ) >= LooseVersion ( "3.1" ) :
if _supports_avx ( ) : # Scale down HMM thread default to avoid overuse of cores
# https : / / github . com / bcbio / bcbio - nextgen / issues / 2442
if gatk_type == "gatk4" :
params += [ "--native-pair-hmm-threads" , "1" ]
# GATK4 selects the right HMM optimization automatically with FASTEST _ AVAILABLE
# GATK3 needs to be explicitly set
else :
params += [ "--pair_hmm_implementation" , "VECTOR_LOGLESS_CACHING" ]
resources = config_utils . get_resources ( "gatk-haplotype" , items [ 0 ] [ "config" ] )
if "options" in resources :
params += [ str ( x ) for x in resources . get ( "options" , [ ] ) ]
# Prepare gVCFs if doing joint calling
is_joint = False
if _joint_calling ( items ) or any ( "gvcf" in dd . get_tools_on ( d ) for d in items ) :
is_joint = True
# If joint calling parameters not set in user options
if not any ( [ x in [ "--emit-ref-confidence" , "-ERC" , "--emitRefConfidence" ] for x in params ] ) :
if gatk_type == "gatk4" :
params += [ "--emit-ref-confidence" , "GVCF" ]
else :
params += [ "--emitRefConfidence" , "GVCF" ]
params += [ "--variant_index_type" , "LINEAR" , "--variant_index_parameter" , "128000" ]
# Set GQ banding to not be single GQ resolution
# No recommended default but try to balance resolution and size
# http : / / gatkforums . broadinstitute . org / gatk / discussion / 7051 / recommendation - best - practices - gvcf - gq - bands
if not any ( [ x in [ "-GQB" ] for x in params ] ) :
for boundary in [ 10 , 20 , 30 , 40 , 60 , 80 ] :
params += [ "-GQB" , str ( boundary ) ]
# Enable non - diploid calling in GATK 3.3 +
if LooseVersion ( broad_runner . gatk_major_version ( ) ) >= LooseVersion ( "3.3" ) :
params += [ "-ploidy" , str ( ploidy . get_ploidy ( items , region ) ) ]
if gatk_type == "gatk4" : # GATK4 Spark calling does not support bgzipped output , use plain VCFs
if is_joint and _use_spark ( num_cores , gatk_type , items , spark_opts ) :
tx_out_file = tx_out_file . replace ( ".vcf.gz" , ".vcf" )
params += [ "--output" , tx_out_file ]
else :
params += [ "-o" , tx_out_file ]
broad_runner . new_resources ( "gatk-haplotype" )
memscale = { "magnitude" : 0.9 * num_cores , "direction" : "increase" } if num_cores > 1 else None
try :
broad_runner . run_gatk ( params , os . path . dirname ( tx_out_file ) , memscale = memscale , parallel_gc = _use_spark ( num_cores , gatk_type , items , spark_opts ) )
except subprocess . CalledProcessError as msg : # Spark failing on regions without any reads , write an empty VCF instead
# https : / / github . com / broadinstitute / gatk / issues / 4234
if ( _use_spark ( num_cores , gatk_type , items , spark_opts ) and str ( msg ) . find ( "java.lang.UnsupportedOperationException: empty collection" ) >= 0 and str ( msg ) . find ( "at org.apache.spark.rdd.RDD" ) >= 0 ) :
vcfutils . write_empty_vcf ( tx_out_file , samples = [ dd . get_sample_name ( d ) for d in items ] )
else :
raise
if tx_out_file . endswith ( ".vcf" ) :
vcfutils . bgzip_and_index ( tx_out_file , items [ 0 ] [ "config" ] )
# avoid bug in GATK where files can get output as non - compressed
if out_file . endswith ( ".gz" ) and not os . path . exists ( out_file + ".tbi" ) :
with open ( out_file , "r" ) as in_handle :
is_plain_text = in_handle . readline ( ) . startswith ( "##fileformat" )
if is_plain_text :
text_out_file = out_file
out_file = out_file . replace ( ".vcf.gz" , ".vcf" )
shutil . move ( text_out_file , out_file )
return vcfutils . bgzip_and_index ( out_file , items [ 0 ] [ "config" ] )
|
def _get_path ( dataset_name ) :
"""Returns path to where checksums are stored for a given dataset ."""
|
path = _checksum_paths ( ) . get ( dataset_name , None )
if path :
return path
msg = ( 'No checksums file could be find for dataset %s. Please create one in ' 'one of: %s' ) % ( dataset_name , ', ' . join ( _CHECKSUM_DIRS ) )
raise AssertionError ( msg )
|
def filter_records ( root , head , update , filters = ( ) ) :
"""Apply the filters to the records ."""
|
root , head , update = freeze ( root ) , freeze ( head ) , freeze ( update )
for filter_ in filters :
root , head , update = filter_ ( root , head , update )
return thaw ( root ) , thaw ( head ) , thaw ( update )
|
def remove_comments ( code ) :
"""Remove C - style comment from GLSL code string ."""
|
pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)"
# first group captures quoted strings ( double or single )
# second group captures comments ( / / single - line or / * multi - line * / )
regex = re . compile ( pattern , re . MULTILINE | re . DOTALL )
def do_replace ( match ) : # if the 2nd group ( capturing comments ) is not None ,
# it means we have captured a non - quoted ( real ) comment string .
if match . group ( 2 ) is not None :
return ""
# so we will return empty to remove the comment
else : # otherwise , we will return the 1st group
return match . group ( 1 )
# captured quoted - string
return regex . sub ( do_replace , code )
|
def signature ( self , cmd ) :
'''Convenience function that returns dict of function signature ( s ) specified by cmd .
cmd is dict of the form :
' module ' : ' modulestring ' ,
' tgt ' : ' targetpatternstring ' ,
' tgt _ type ' : ' targetpatterntype ' ,
' token ' : ' salttokenstring ' ,
' username ' : ' usernamestring ' ,
' password ' : ' passwordstring ' ,
' eauth ' : ' eauthtypestring ' ,
The cmd dict items are as follows :
module : required . This is either a module or module function name for
the specified client .
tgt : Optional pattern string specifying the targeted minions when client
is ' minion '
tgt _ type : Optional target pattern type string when client is ' minion ' .
Example : ' glob ' defaults to ' glob ' if missing
token : the salt token . Either token : is required or the set of username : ,
password : , and eauth :
username : the salt username . Required if token is missing .
password : the user ' s password . Required if token is missing .
eauth : the authentication type such as ' pam ' or ' ldap ' . Required if token is missing
Adds client per the command .'''
|
cmd [ 'client' ] = 'minion'
if len ( cmd [ 'module' ] . split ( '.' ) ) > 2 and cmd [ 'module' ] . split ( '.' ) [ 0 ] in [ 'runner' , 'wheel' ] :
cmd [ 'client' ] = 'master'
return self . _signature ( cmd )
|
def logs ( self , num = None , source = None , ps = None , tail = False ) :
"""Returns the requested log ."""
|
# Bootstrap payload package .
payload = { 'logplex' : 'true' }
if num :
payload [ 'num' ] = num
if source :
payload [ 'source' ] = source
if ps :
payload [ 'ps' ] = ps
if tail :
payload [ 'tail' ] = 1
# Grab the URL of the logplex endpoint .
r = self . _h . _http_resource ( method = 'GET' , resource = ( 'apps' , self . name , 'logs' ) , data = payload )
# Grab the actual logs .
r = requests . get ( r . content . decode ( "utf-8" ) , verify = False , stream = True )
if not tail :
return r . content
else : # Return line iterator for tail !
return r . iter_lines ( )
|
def find ( whatever = None , language = None , iso639_1 = None , iso639_2 = None , native = None ) :
"""Find data row with the language .
: param whatever : key to search in any of the following fields
: param language : key to search in English language name
: param iso639_1 : key to search in ISO 639-1 code ( 2 digits )
: param iso639_2 : key to search in ISO 639-2 code ( 3 digits ,
bibliographic & terminological )
: param native : key to search in native language name
: return : a dict with keys ( u ' name ' , u ' iso639_1 ' , u ' iso639_2 _ b ' ,
u ' iso639_2 _ t ' , u ' native ' )
All arguments can be both string or unicode ( Python 2 ) .
If there are multiple names defined , any of these can be looked for ."""
|
if whatever :
keys = [ u'name' , u'iso639_1' , u'iso639_2_b' , u'iso639_2_t' , u'native' ]
val = whatever
elif language :
keys = [ u'name' ]
val = language
elif iso639_1 :
keys = [ u'iso639_1' ]
val = iso639_1
elif iso639_2 :
keys = [ u'iso639_2_b' , u'iso639_2_t' ]
val = iso639_2
elif native :
keys = [ u'native' ]
val = native
else :
raise ValueError ( 'Invalid search criteria.' )
val = unicode ( val ) . lower ( )
return next ( ( item for item in data if any ( val in item [ key ] . lower ( ) . split ( "; " ) for key in keys ) ) , None )
|
def _get_activation ( self , F , inputs , activation , ** kwargs ) :
"""Get activation function . Convert if is string"""
|
func = { 'tanh' : F . tanh , 'relu' : F . relu , 'sigmoid' : F . sigmoid , 'softsign' : F . softsign } . get ( activation )
if func :
return func ( inputs , ** kwargs )
elif isinstance ( activation , string_types ) :
return F . Activation ( inputs , act_type = activation , ** kwargs )
elif isinstance ( activation , LeakyReLU ) :
return F . LeakyReLU ( inputs , act_type = 'leaky' , slope = activation . _alpha , ** kwargs )
return activation ( inputs , ** kwargs )
|
def get ( self , url , ** opt ) :
'''Convert the resource url to a complete url and then fetch the
data from it .
Args :
url : The url of an OEmbed resource .
* * opt : Parameters passed to the url .
Returns :
OEmbedResponse object according to data fetched'''
|
return self . fetch ( self . request ( url , ** opt ) )
|
def tlog_inv ( y , th = 1 , r = _display_max , d = _l_mmax ) :
"""Inverse truncated log10 transform .
Values
Parameters
y : num | num iterable
values to be transformed .
th : num
Inverse values below th are transormed to th .
Must be > positive .
r : num ( default = 10 * * 4)
maximal transformed value .
d : num ( default = log10(2 * * 18 ) )
log10 of maximal possible measured value .
tlog _ inv ( r ) = 10 * * d
Returns
Array of transformed values ."""
|
if th <= 0 :
raise ValueError ( 'Threshold value must be positive. %s given.' % th )
x = 10 ** ( y * 1. * d / r )
try :
x [ x < th ] = th
except TypeError :
if x < th :
x = th
return x
|
def scrypt_mcf_check ( mcf , password ) :
"""Returns True if the password matches the given MCF hash"""
|
if isinstance ( password , unicode ) :
password = password . encode ( 'utf8' )
elif not isinstance ( password , bytes ) :
raise TypeError ( 'password must be a unicode or byte string' )
if not isinstance ( mcf , bytes ) :
raise TypeError ( 'MCF must be a byte string' )
if mcf_mod . _scrypt_mcf_7_is_standard ( mcf ) and not _scrypt_ll :
return _scrypt_str_chk ( mcf , password , len ( password ) ) == 0
return mcf_mod . scrypt_mcf_check ( scrypt , mcf , password )
|
def assignment_propagation ( node ) :
"""Perform assignment propagation .
Assignment propagation is not a compiler optimization as much as a
readability optimization . If a variable name is used only once , it gets
renamed when possible e . g . ` y = x ; z = y ` will become ` z = x ` .
Args :
node : The AST to optimize .
Returns :
The optimized AST ."""
|
n_reads = read_counts ( node )
to_remove = [ ]
for succ in gast . walk ( node ) : # We found an assignment of the form a = b
# - Left - hand side is a Name , right - hand side is a Name .
if ( isinstance ( succ , gast . Assign ) and isinstance ( succ . value , gast . Name ) and len ( succ . targets ) == 1 and isinstance ( succ . targets [ 0 ] , gast . Name ) ) :
rhs_name = succ . value . id
# We now find all the places that b was defined
rhs_defs = [ def_ [ 1 ] for def_ in anno . getanno ( succ , 'definitions_in' ) if def_ [ 0 ] == rhs_name ]
# If b was defined in only one place ( not an argument ) , and wasn ' t used
# anywhere else but in a = = b , and was defined as b = x , then we can fold
# the statements
if ( len ( rhs_defs ) == 1 and isinstance ( rhs_defs [ 0 ] , gast . Assign ) and n_reads [ rhs_defs [ 0 ] ] == 1 and isinstance ( rhs_defs [ 0 ] . value , gast . Name ) and isinstance ( rhs_defs [ 0 ] . targets [ 0 ] , gast . Name ) ) : # Mark rhs _ def for deletion
to_remove . append ( rhs_defs [ 0 ] )
# Propagate the definition
succ . value = rhs_defs [ 0 ] . value
# Remove the definitions we folded
transformers . Remove ( to_remove ) . visit ( node )
anno . clearanno ( node )
return node
|
def client_id_from_id_token ( id_token ) :
"""Extracts the client id from a JSON Web Token ( JWT ) .
Returns a string or None ."""
|
payload = JWT ( ) . unpack ( id_token ) . payload ( )
aud = payload . get ( 'aud' , None )
if aud is None :
return None
if isinstance ( aud , list ) :
return aud [ 0 ]
return aud
|
def ofp_instruction_from_str ( ofproto , action_str ) :
"""Parse an ovs - ofctl style action string and return a list of
jsondict representations of OFPInstructionActions , which
can then be passed to ofproto _ parser . ofp _ instruction _ from _ jsondict .
Please note that this is for making transition from ovs - ofctl
easier . Please consider using OFPAction constructors when writing
new codes .
This function takes the following arguments .
Argument Description
ofproto An ofproto module .
action _ str An action string ."""
|
action_re = re . compile ( r"([a-z_]+)(\([^)]*\)|[^a-z_,()][^,()]*)*" )
result = [ ]
while len ( action_str ) :
m = action_re . match ( action_str )
if not m :
raise ryu . exception . OFPInvalidActionString ( action_str = action_str )
action_name = m . group ( 1 )
this_action = m . group ( 0 )
paren_level = this_action . count ( '(' ) - this_action . count ( ')' )
assert paren_level >= 0
try : # Parens can be nested . Look for as many ' ) ' s as ' ( ' s .
if paren_level > 0 :
this_action , rest = _tokenize_paren_block ( action_str , m . end ( 0 ) )
else :
rest = action_str [ m . end ( 0 ) : ]
if len ( rest ) :
assert rest [ 0 ] == ','
rest = rest [ 1 : ]
except Exception :
raise ryu . exception . OFPInvalidActionString ( action_str = action_str )
if action_name == 'drop' :
assert this_action == 'drop'
assert len ( result ) == 0 and rest == ''
return [ ]
converter = getattr ( OfctlActionConverter , action_name , None )
if converter is None or not callable ( converter ) :
raise ryu . exception . OFPInvalidActionString ( action_str = action_name )
result . append ( converter ( ofproto , this_action ) )
action_str = rest
return result
|
def mapping_to_frozenset ( mapping ) :
"""Be aware that this treats any sequence type with the equal members as
equal . As it is used to identify equality of schemas , this can be
considered okay as definitions are semantically equal regardless the
container type ."""
|
mapping = mapping . copy ( )
for key , value in mapping . items ( ) :
if isinstance ( value , Mapping ) :
mapping [ key ] = mapping_to_frozenset ( value )
elif isinstance ( value , Sequence ) :
value = list ( value )
for i , item in enumerate ( value ) :
if isinstance ( item , Mapping ) :
value [ i ] = mapping_to_frozenset ( item )
mapping [ key ] = tuple ( value )
return frozenset ( mapping . items ( ) )
|
def add_model ( self , allow_alias = False , ** kwargs ) :
"""Add a ` Model ` instance to this entry ."""
|
if not allow_alias and MODEL . ALIAS in kwargs :
err_str = "`{}` passed in kwargs, this shouldn't happen!" . format ( SOURCE . ALIAS )
self . _log . error ( err_str )
raise RuntimeError ( err_str )
# Set alias number to be + 1 of current number of models
if MODEL . ALIAS not in kwargs :
kwargs [ MODEL . ALIAS ] = str ( self . num_models ( ) + 1 )
model_obj = self . _init_cat_dict ( Model , self . _KEYS . MODELS , ** kwargs )
if model_obj is None :
return None
for item in self . get ( self . _KEYS . MODELS , '' ) :
if model_obj . is_duplicate_of ( item ) :
return item [ item . _KEYS . ALIAS ]
self . setdefault ( self . _KEYS . MODELS , [ ] ) . append ( model_obj )
return model_obj [ model_obj . _KEYS . ALIAS ]
|
def init_widget ( self ) :
"""Initialize the underlying widget ."""
|
super ( AndroidTextureView , self ) . __init__ ( self )
w = self . widget
w . setSurfaceTextureListener ( w . getId ( ) )
w . onSurfaceTextureAvailable . connect ( self . on_surface_texture_available )
w . onSurfaceTextureDestroyed . connect ( self . on_surface_texture_destroyed )
w . onSurfaceTextureChanged . connect ( self . on_surface_texture_changed )
w . onSurfaceTextureUpdated . connect ( self . on_surface_texture_updated )
|
def read ( self ) :
"""Read command result from blink ( 1 ) , low - level internal use
Receive USB Feature Report 0x01 from blink ( 1 ) with 8 - byte payload
Note : buf must be 8 bytes or bad things happen"""
|
buf = self . dev . get_feature_report ( REPORT_ID , 9 )
log . debug ( "blink1read: " + "," . join ( '0x%02x' % v for v in buf ) )
return buf
|
def alter_partition ( self , spec , location = None , format = None , tbl_properties = None , serde_properties = None , ) :
"""Change setting and parameters of an existing partition
Parameters
spec : dict or list
The partition keys for the partition being modified
location : string , optional
format : string , optional
tbl _ properties : dict , optional
serde _ properties : dict , optional
Returns
None ( for now )"""
|
part_schema = self . partition_schema ( )
def _run_ddl ( ** kwds ) :
stmt = ddl . AlterPartition ( self . _qualified_name , spec , part_schema , ** kwds )
return self . _execute ( stmt )
return self . _alter_table_helper ( _run_ddl , location = location , format = format , tbl_properties = tbl_properties , serde_properties = serde_properties , )
|
def reduce ( self , dimensions = None , function = None , spread_fn = None , ** reduce_map ) :
"""Applies reduction to elements along the specified dimension ( s ) .
Allows reducing the values along one or more key dimension
with the supplied function . Supports two signatures :
Reducing with a list of dimensions , e . g . :
ds . reduce ( [ ' x ' ] , np . mean )
Defining a reduction using keywords , e . g . :
ds . reduce ( x = np . mean )
Args :
dimensions : Dimension ( s ) to apply reduction on
Defaults to all key dimensions
function : Reduction operation to apply , e . g . numpy . mean
spreadfn : Secondary reduction to compute value spread
Useful for computing a confidence interval , spread , or
standard deviation .
* * reductions : Keyword argument defining reduction
Allows reduction to be defined as keyword pair of
dimension and function
Returns :
The Dataset after reductions have been applied ."""
|
if util . config . future_deprecations :
self . param . warning ( 'The HoloMap.reduce method is deprecated, ' 'for equivalent functionality use ' 'HoloMap.apply.reduce().collapse().' )
from . . element import Table
reduced_items = [ ( k , v . reduce ( dimensions , function , spread_fn , ** reduce_map ) ) for k , v in self . items ( ) ]
if not isinstance ( reduced_items [ 0 ] [ 1 ] , Table ) :
params = dict ( util . get_param_values ( self . last ) , kdims = self . kdims , vdims = self . last . vdims )
return Table ( reduced_items , ** params )
return Table ( self . clone ( reduced_items ) . collapse ( ) )
|
def main ( ) :
"""Remove unused imports Unsafe !
Only tested on our codebase , which uses simple absolute imports on the form , " import
a . b . c " ."""
|
parser = argparse . ArgumentParser ( description = __doc__ , formatter_class = argparse . RawDescriptionHelpFormatter )
parser . add_argument ( "path" , nargs = "+" , help = "File or directory path" )
parser . add_argument ( "--exclude" , nargs = "+" , help = "Exclude glob patterns" )
parser . add_argument ( "--no-recursive" , dest = "recursive" , action = "store_false" , help = "Search directories recursively" , )
parser . add_argument ( "--ignore-invalid" , action = "store_true" , help = "Ignore invalid paths" )
parser . add_argument ( "--pycharm" , action = "store_true" , help = "Enable PyCharm integration" )
parser . add_argument ( "--diff" , dest = "show_diff" , action = "store_true" , help = "Show diff and do not modify any files" , )
parser . add_argument ( "--dry-run" , action = "store_true" , help = "Process files but do not write results" )
parser . add_argument ( "--debug" , action = "store_true" , help = "Debug level logging" )
args = parser . parse_args ( )
d1_common . util . log_setup ( args . debug )
repo_path = d1_dev . util . find_repo_root_by_path ( __file__ )
repo = git . Repo ( repo_path )
specified_file_path_list = get_specified_file_path_list ( args )
# tracked _ path _ list = list ( d1 _ dev . util . get _ tracked _ files ( repo ) )
# format _ path _ list = sorted (
# set ( specified _ file _ path _ list ) . intersection ( tracked _ path _ list )
format_path_list = specified_file_path_list
for format_path in format_path_list :
comment_unused_imports ( args , format_path )
|
def winddir_text ( pts ) :
"Convert wind direction from 0 . . 15 to compass point text"
|
global _winddir_text_array
if pts is None :
return None
if not isinstance ( pts , int ) :
pts = int ( pts + 0.5 ) % 16
if not _winddir_text_array :
_ = pywws . localisation . translation . ugettext
_winddir_text_array = ( _ ( u'N' ) , _ ( u'NNE' ) , _ ( u'NE' ) , _ ( u'ENE' ) , _ ( u'E' ) , _ ( u'ESE' ) , _ ( u'SE' ) , _ ( u'SSE' ) , _ ( u'S' ) , _ ( u'SSW' ) , _ ( u'SW' ) , _ ( u'WSW' ) , _ ( u'W' ) , _ ( u'WNW' ) , _ ( u'NW' ) , _ ( u'NNW' ) , )
return _winddir_text_array [ pts ]
|
def profile_list ( ** kwargs ) :
"""Show uploaded profiles ."""
|
ctx = Context ( ** kwargs )
ctx . execute_action ( 'profile:list' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , } )
|
def readline ( self , timeout = 500 ) :
"""Try to read our I / O for ' timeout ' milliseconds , return None otherwise .
This makes calling and reading I / O non blocking !"""
|
poll_result = self . poller . poll ( timeout )
if poll_result :
line = self . io . readline ( ) . strip ( )
if self . io == sys . stdin and line == "[" : # skip first event line wrt issue # 19
line = self . io . readline ( ) . strip ( )
try : # python3 compatibility code
line = line . decode ( )
except ( AttributeError , UnicodeDecodeError ) :
pass
return line
else :
return None
|
def scanf ( format , s = None , collapseWhitespace = True ) :
"""scanf supports the following formats :
% c One character
%5c 5 characters
% d , % i int value
%7d , % 7i int value with length 7
% f float value
% o octal value
% X , % x hex value
% s string terminated by whitespace
Examples :
> > > scanf ( " % s - % d errors , % d warnings " , " / usr / sbin / sendmail - 0 errors , 4 warnings " )
( ' / usr / sbin / sendmail ' , 0 , 4)
> > > scanf ( " % o % x % d " , " 0123 0x123 123 " )
(83 , 291 , 123)
scanf . scanf returns a tuple of found values
or None if the format does not match ."""
|
if s is None :
s = sys . stdin
if hasattr ( s , "readline" ) :
s = s . readline ( )
format_re , casts = scanf_compile ( format , collapseWhitespace )
found = format_re . search ( s )
if found :
groups = found . groups ( )
return tuple ( [ casts [ i ] ( groups [ i ] ) for i in range ( len ( groups ) ) ] )
|
def read_10x_mtx ( path , var_names = 'gene_symbols' , make_unique = True , cache = False , gex_only = True ) -> AnnData :
"""Read 10x - Genomics - formatted mtx directory .
Parameters
path : ` str `
Path to directory for ` . mtx ` and ` . tsv ` files ,
e . g . ' . / filtered _ gene _ bc _ matrices / hg19 / ' .
var _ names : { ' gene _ symbols ' , ' gene _ ids ' } , optional ( default : ' gene _ symbols ' )
The variables index .
make _ unique : ` bool ` , optional ( default : ` True ` )
Whether to make the variables index unique by appending ' - 1 ' ,
' - 2 ' etc . or not .
cache : ` bool ` , optional ( default : ` False ` )
If ` False ` , read from source , if ` True ` , read from fast ' h5ad ' cache .
gex _ only : ` bool ` , optional ( default : ` True ` )
Only keep ' Gene Expression ' data and ignore other feature types ,
e . g . ' Antibody Capture ' , ' CRISPR Guide Capture ' , or ' Custom '
Returns
An : class : ` ~ anndata . AnnData ` object"""
|
path = Path ( path )
genefile_exists = ( path / 'genes.tsv' ) . is_file ( )
read = _read_legacy_10x_mtx if genefile_exists else _read_v3_10x_mtx
adata = read ( str ( path ) , var_names = var_names , make_unique = make_unique , cache = cache , )
if genefile_exists or not gex_only :
return adata
else :
gex_rows = list ( map ( lambda x : x == 'Gene Expression' , adata . var [ 'feature_types' ] ) )
return adata [ : , gex_rows ]
|
def setup_default_wrappers ( self ) :
"""Setup defaulf wrappers .
Wrappers are applied when view method does not return instance
of Response . In this case nefertari renderers call wrappers and
handle response generation ."""
|
# Index
self . _after_calls [ 'index' ] = [ wrappers . wrap_in_dict ( self . request ) , wrappers . add_meta ( self . request ) , wrappers . add_object_url ( self . request ) , ]
# Show
self . _after_calls [ 'show' ] = [ wrappers . wrap_in_dict ( self . request ) , wrappers . add_meta ( self . request ) , wrappers . add_object_url ( self . request ) , ]
# Create
self . _after_calls [ 'create' ] = [ wrappers . wrap_in_dict ( self . request ) , wrappers . add_meta ( self . request ) , wrappers . add_object_url ( self . request ) , ]
# Update
self . _after_calls [ 'update' ] = [ wrappers . wrap_in_dict ( self . request ) , wrappers . add_meta ( self . request ) , wrappers . add_object_url ( self . request ) , ]
# Replace
self . _after_calls [ 'replace' ] = [ wrappers . wrap_in_dict ( self . request ) , wrappers . add_meta ( self . request ) , wrappers . add_object_url ( self . request ) , ]
# Privacy wrappers
if self . _auth_enabled :
for meth in ( 'index' , 'show' , 'create' , 'update' , 'replace' ) :
self . _after_calls [ meth ] += [ wrappers . apply_privacy ( self . request ) , ]
for meth in ( 'update' , 'replace' , 'update_many' ) :
self . _before_calls [ meth ] += [ wrappers . apply_request_privacy ( self . Model , self . _json_params ) , ]
|
def exception ( self , ncode ) :
"""Looks up the exception in error _ dictionary and raises it .
Required arguments :
* ncode - Error numerical code ."""
|
error = self . error_dictionary [ ncode ]
error_msg = self . _buffer [ self . _index - 1 ] . split ( None , 3 ) [ 3 ]
exec ( 'raise self.%s("%s: %s")' % ( error , error , error_msg ) )
|
def get_global_compatibility_level ( self ) :
"""Gets the global compatibility level ."""
|
res = requests . get ( self . _url ( '/config' ) , headers = HEADERS )
raise_if_failed ( res )
return res . json ( ) [ 'compatibility' ]
|
def scale_T ( T , P_I , I_F ) :
"""Scale T with a block diagonal matrix .
Helper function that scales T with a right multiplication by a block
diagonal inverse , so that T is the identity at C - node rows .
Parameters
T : { bsr _ matrix }
Tentative prolongator , with square blocks in the BSR data structure ,
and a non - overlapping block - diagonal structure
P _ I : { bsr _ matrix }
Interpolation operator that carries out only simple injection from the
coarse grid to fine grid Cpts nodes
I _ F : { bsr _ matrix }
Identity operator on Fpts , i . e . , the action of this matrix zeros
out entries in a vector at all Cpts , leaving Fpts untouched
Returns
T : { bsr _ matrix }
Tentative prolongator scaled to be identity at C - pt nodes
Examples
> > > from scipy . sparse import csr _ matrix , bsr _ matrix
> > > from scipy import matrix , array
> > > from pyamg . util . utils import scale _ T
> > > T = matrix ( [ [ 1.0 , 0 . , 0 . ] ,
. . . [ 0.5 , 0 . , 0 . ] ,
. . . [ 0 . , 1 . , 0 . ] ,
. . . [ 0 . , 0.5 , 0 . ] ,
. . . [ 0 . , 0 . , 1 . ] ,
. . . [ 0 . , 0 . , 0.25 ] ] )
> > > P _ I = matrix ( [ [ 0 . , 0 . , 0 . ] ,
. . . [ 1 . , 0 . , 0 . ] ,
. . . [ 0 . , 1 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . ] ,
. . . [ 0 . , 0 . , 1 . ] ] )
> > > I _ F = matrix ( [ [ 1 . , 0 . , 0 . , 0 . , 0 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . , 0 . , 0 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . , 0 . , 0 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . , 1 . , 0 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . , 0 . , 1 . , 0 . ] ,
. . . [ 0 . , 0 . , 0 . , 0 . , 0 . , 0 . ] ] )
> > > scale _ T ( bsr _ matrix ( T ) , bsr _ matrix ( P _ I ) , bsr _ matrix ( I _ F ) ) . todense ( )
matrix ( [ [ 2 . , 0 . , 0 . ] ,
[ 1 . , 0 . , 0 . ] ,
[ 0 . , 1 . , 0 . ] ,
[ 0 . , 0.5 , 0 . ] ,
[ 0 . , 0 . , 4 . ] ,
[ 0 . , 0 . , 1 . ] ] )
Notes
This routine is primarily used in
pyamg . aggregation . smooth . energy _ prolongation _ smoother , where it is used to
generate a suitable initial guess for the energy - minimization process , when
root - node style SA is used . This function , scale _ T , takes an existing
tentative prolongator and ensures that it injects from the coarse - grid to
fine - grid root - nodes .
When generating initial guesses for root - node style prolongation operators ,
this function is usually called after pyamg . uti . utils . filter _ operator
This function assumes that the eventual coarse - grid nullspace vectors
equal coarse - grid injection applied to the fine - grid nullspace vectors ."""
|
if not isspmatrix_bsr ( T ) :
raise TypeError ( 'Expected BSR matrix T' )
elif T . blocksize [ 0 ] != T . blocksize [ 1 ] :
raise TypeError ( 'Expected BSR matrix T with square blocks' )
if not isspmatrix_bsr ( P_I ) :
raise TypeError ( 'Expected BSR matrix P_I' )
elif P_I . blocksize [ 0 ] != P_I . blocksize [ 1 ] :
raise TypeError ( 'Expected BSR matrix P_I with square blocks' )
if not isspmatrix_bsr ( I_F ) :
raise TypeError ( 'Expected BSR matrix I_F' )
elif I_F . blocksize [ 0 ] != I_F . blocksize [ 1 ] :
raise TypeError ( 'Expected BSR matrix I_F with square blocks' )
if ( I_F . blocksize [ 0 ] != P_I . blocksize [ 0 ] ) or ( I_F . blocksize [ 0 ] != T . blocksize [ 0 ] ) :
raise TypeError ( 'Expected identical blocksize in I_F, P_I and T' )
# Only do if we have a non - trivial coarse - grid
if P_I . nnz > 0 : # Construct block diagonal inverse D
D = P_I . T * T
if D . nnz > 0 : # changes D in place
pinv_array ( D . data )
# Scale T to be identity at root - nodes
T = T * D
# Ensure coarse - grid injection
T = I_F * T + P_I
return T
|
def astype ( self , dtype ) :
"""Cast DataFrame columns to given dtype .
Parameters
dtype : numpy . dtype or dict
Dtype or column _ name - > dtype mapping to cast columns to . Note index is excluded .
Returns
DataFrame
With casted columns ."""
|
if isinstance ( dtype , np . dtype ) :
new_data = OrderedDict ( ( column . name , column . astype ( dtype ) ) for column in self . _iter ( ) )
return DataFrame ( new_data , self . index )
elif isinstance ( dtype , dict ) :
check_inner_types ( dtype . values ( ) , np . dtype )
new_data = OrderedDict ( self . _data )
for column in self . _iter ( ) :
column_name = column . name
if column_name in dtype :
new_data [ column_name ] = column . astype ( dtype [ column_name ] )
return DataFrame ( new_data , self . index )
else :
raise TypeError ( 'Expected numpy.dtype or dict mapping column names to dtypes' )
|
def build_D3treeStandard ( old , MAX_DEPTH , level = 1 , toplayer = None ) :
"""For d3s examples all we need is a json with name , children and size . . eg
" name " : " flare " ,
" children " : [
" name " : " analytics " ,
" children " : [
" name " : " cluster " ,
" children " : [
{ " name " : " AgglomerativeCluster " , " size " : 3938 } ,
{ " name " : " CommunityStructure " , " size " : 3812 } ,
{ " name " : " HierarchicalCluster " , " size " : 6714 } ,
{ " name " : " MergeEdge " , " size " : 743}
etc . . ."""
|
out = [ ]
if not old :
old = toplayer
for x in old :
d = { }
# print " * " * level , x . label
d [ 'qname' ] = x . qname
d [ 'name' ] = x . bestLabel ( quotes = False ) . replace ( "_" , " " )
d [ 'objid' ] = x . id
if x . children ( ) and level < MAX_DEPTH :
d [ 'size' ] = len ( x . children ( ) ) + 5
# fake size
d [ 'realsize' ] = len ( x . children ( ) )
# real size
d [ 'children' ] = build_D3treeStandard ( x . children ( ) , MAX_DEPTH , level + 1 )
else :
d [ 'size' ] = 1
# default size
d [ 'realsize' ] = 0
# default size
out += [ d ]
return out
|
def sample ( self , N = 1 ) :
"""Sample N trajectories from the posterior .
Note
Performs the forward step in case it has not been performed ."""
|
if not self . filt :
self . forward ( )
paths = np . empty ( ( len ( self . filt ) , N ) , np . int )
paths [ - 1 , : ] = rs . multinomial ( self . filt [ - 1 ] , M = N )
log_trans = np . log ( self . hmm . trans_mat )
for t , f in reversed ( list ( enumerate ( self . filt [ : - 1 ] ) ) ) :
for n in range ( N ) :
probs = rs . exp_and_normalise ( log_trans [ : , paths [ t + 1 , n ] ] + np . log ( f ) )
paths [ t , n ] = rs . multinomial_once ( probs )
return paths
|
def host ( self , hostname , owner = None , ** kwargs ) :
"""Create the Host TI object .
Args :
owner :
hostname :
* * kwargs :
Return :"""
|
return Host ( self . tcex , hostname , owner = owner , ** kwargs )
|
def get_age_levels ( self ) :
"""Method to add a " level " column to the ages table .
Finds the lowest filled in level ( i . e . , specimen , sample , etc . )
for that particular row .
I . e . , a row with both site and sample name filled in is considered
a sample - level age .
Returns
self . tables [ ' ages ' ] : MagicDataFrame
updated ages table"""
|
def get_level ( ser , levels = ( 'specimen' , 'sample' , 'site' , 'location' ) ) :
for level in levels :
if pd . notnull ( ser [ level ] ) :
if len ( ser [ level ] ) : # guard against empty strings
return level
return
# get available levels in age table
possible_levels = [ 'specimen' , 'sample' , 'site' , 'location' ]
levels = [ level for level in possible_levels if level in self . tables [ 'ages' ] . df . columns ]
# find level for each age row
age_levels = self . tables [ 'ages' ] . df . apply ( get_level , axis = 1 , args = [ levels ] )
if any ( age_levels ) :
self . tables [ 'ages' ] . df . loc [ : , 'level' ] = age_levels
return self . tables [ 'ages' ]
|
def _unescape_token ( escaped_token ) :
"""Inverse of _ escape _ token ( ) .
Args :
escaped _ token : a unicode string
Returns :
token : a unicode string"""
|
def match ( m ) :
if m . group ( 1 ) is None :
return u"_" if m . group ( 0 ) == u"\\u" else u"\\"
try :
return six . unichr ( int ( m . group ( 1 ) ) )
except ( ValueError , OverflowError ) :
return ""
trimmed = escaped_token [ : - 1 ] if escaped_token . endswith ( "_" ) else escaped_token
return _UNESCAPE_REGEX . sub ( match , trimmed )
|
def post_message ( plugin , polled_time , identity , message ) :
"""Post single message
: type plugin : errbot . BotPlugin
: type polled _ time : datetime . datetime
: type identity : str
: type message : str"""
|
user = plugin . build_identifier ( identity )
return plugin . send ( user , message )
|
def guinieranalysis ( samplenames , qranges = None , qmax_from_shanum = True , prfunctions_postfix = '' , dist = None , plotguinier = True , graph_extension = '.png' , dmax = None , dmax_from_shanum = False ) :
"""Perform Guinier analysis on the samples .
Inputs :
samplenames : list of sample names
qranges : dictionary of q ranges for each sample . The keys are sample names . The special ' _ _ default _ _ ' key
corresponds to all samples which do not have a key in the dict .
qmax _ from _ shanum : use the qmax determined by the shanum program for the GNOM input .
prfunctions _ postfix : The figure showing the P ( r ) functions will be saved as
prfunctions _ < prfunctions _ postfix > < graph _ extension >
dist : the sample - to - detector distance to use .
plotguinier : if Guinier plots are needed .
graph _ extension : the extension of the saved graph image files .
dmax : Dict of Dmax parameters . If not found or None , determine automatically using DATGNOM . If found ,
GNOM is used . The special key ' _ _ default _ _ ' works in a similar fashion as for ` qranges ` ."""
|
figpr = plt . figure ( )
ip = get_ipython ( )
axpr = figpr . add_subplot ( 1 , 1 , 1 )
if qranges is None :
qranges = { '__default__' : ( 0 , 1000000 ) }
if dmax is None :
dmax = { '__default__' : None }
if '__default__' not in qranges :
qranges [ '__default__' ] = ( 0 , 1000000 )
if '__default__' not in dmax :
dmax [ '__default__' ] = None
table_autorg = [ [ 'Name' , 'Rg (nm)' , 'I$_0$ (cm$^{-1}$ sr$^{-1}$)' , 'q$_{min}$ (nm$^{-1}$)' , 'q$_{max}$ (nm$^{-1}$)' , 'qmin*Rg' , 'qmax*Rg' , 'quality' , 'aggregation' , 'Dmax (nm)' , 'q$_{shanum}$ (nm$^{-1}$)' ] ]
table_gnom = [ [ 'Name' , 'Rg (nm)' , 'I$_0$ (cm$^{-1}$ sr$^{-1}$)' , 'qmin (nm$^{-1}$)' , 'qmax (nm$^{-1}$)' , 'Dmin (nm)' , 'Dmax (nm)' , 'Total estimate' , 'Porod volume (nm$^3$)' ] ]
results = { }
for sn in samplenames :
if sn not in qranges :
print ( 'Q-range not given for sample {}: using default one' . format ( sn ) )
qrange = qranges [ '__default__' ]
else :
qrange = qranges [ sn ]
if sn not in dmax :
dmax_ = dmax [ '__default__' ]
else :
dmax_ = dmax [ sn ]
print ( 'Using q-range for sample {}: {} <= q <= {}' . format ( sn , qrange [ 0 ] , qrange [ 1 ] ) )
curve = getsascurve ( sn , dist ) [ 0 ] . trim ( * qrange ) . sanitize ( )
curve . save ( sn + '.dat' )
try :
Rg , I0 , qmin , qmax , quality , aggregation = autorg ( sn + '.dat' )
except ValueError :
print ( 'Error running autorg on %s' % sn )
continue
dmax_shanum , nsh , nopt , qmaxopt = shanum ( sn + '.dat' )
if qmax_from_shanum :
curve_trim = curve . trim ( qmin , qmaxopt )
else :
curve_trim = curve . trim ( qmin , qrange [ 1 ] )
if dmax_from_shanum :
dmax_ = dmax_from_shanum
curve_trim . save ( sn + '_optrange.dat' )
if dmax_ is None :
print ( 'Calling DATGNOM for sample {} with Rg={}, q-range from {} to {}' . format ( sn , Rg . val , curve_trim . q . min ( ) , curve_trim . q . max ( ) ) )
gnompr , metadata = datgnom ( sn + '_optrange.dat' , Rg = Rg . val , noprint = True )
else :
print ( 'Calling GNOM for sample {} with Rmax={}, q-range from {} to {}' . format ( sn , dmax_ , curve_trim . q . min ( ) , curve_trim . q . max ( ) ) )
gnompr , metadata = gnom ( curve_trim , dmax_ )
rg , i0 , vporod = datporod ( sn + '_optrange.out' )
axpr . errorbar ( gnompr [ : , 0 ] , gnompr [ : , 1 ] , gnompr [ : , 2 ] , None , label = sn )
if plotguinier :
figsample = plt . figure ( )
axgnomfit = figsample . add_subplot ( 1 , 2 , 1 )
curve . errorbar ( 'b.' , axes = axgnomfit , label = 'measured' )
axgnomfit . errorbar ( metadata [ 'qj' ] , metadata [ 'jexp' ] , metadata [ 'jerror' ] , None , 'g.' , label = 'gnom input' )
axgnomfit . loglog ( metadata [ 'qj' ] , metadata [ 'jreg' ] , 'r-' , label = 'regularized by GNOM' )
figsample . suptitle ( sn )
axgnomfit . set_xlabel ( 'q (nm$^{-1}$)' )
axgnomfit . set_ylabel ( '$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)' )
axgnomfit . axvline ( qmaxopt , 0 , 1 , linestyle = 'dashed' , color = 'black' , lw = 2 )
axgnomfit . grid ( True , which = 'both' )
axgnomfit . axis ( 'tight' )
axgnomfit . legend ( loc = 'best' )
axguinier = figsample . add_subplot ( 1 , 2 , 2 )
axguinier . errorbar ( curve . q , curve . Intensity , curve . Error , curve . qError , '.' , label = 'Measured' )
q = np . linspace ( qmin , qmax , 100 )
axguinier . plot ( q , I0 . val * np . exp ( - q ** 2 * Rg . val ** 2 / 3 ) , label = 'AutoRg' )
axguinier . plot ( q , metadata [ 'I0_gnom' ] . val * np . exp ( - q ** 2 * metadata [ 'Rg_gnom' ] . val ** 2 / 3 ) , label = 'Gnom' )
axguinier . set_xscale ( 'power' , exponent = 2 )
axguinier . set_yscale ( 'log' )
axguinier . set_xlabel ( 'q (nm$^{-1}$)' )
axguinier . set_ylabel ( '$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)' )
axguinier . legend ( loc = 'best' )
idxmin = np . arange ( len ( curve ) ) [ curve . q <= qmin ] . max ( )
idxmax = np . arange ( len ( curve ) ) [ curve . q >= qmax ] . min ( )
idxmin = max ( 0 , idxmin - 5 )
idxmax = min ( len ( curve ) - 1 , idxmax + 5 )
if plotguinier :
curveguinier = curve . trim ( curve . q [ idxmin ] , curve . q [ idxmax ] )
axguinier . axis ( xmax = curve . q [ idxmax ] , xmin = curve . q [ idxmin ] , ymin = curveguinier . Intensity . min ( ) , ymax = curveguinier . Intensity . max ( ) )
axguinier . grid ( True , which = 'both' )
table_gnom . append ( [ sn , metadata [ 'Rg_gnom' ] . tostring ( extra_digits = 2 ) , metadata [ 'I0_gnom' ] . tostring ( extra_digits = 2 ) , metadata [ 'qmin' ] , metadata [ 'qmax' ] , metadata [ 'dmin' ] , metadata [ 'dmax' ] , metadata [ 'totalestimate_corrected' ] , vporod ] )
table_autorg . append ( [ sn , Rg . tostring ( extra_digits = 2 ) , I0 , '%.3f' % qmin , '%.3f' % qmax , qmin * Rg , qmax * Rg , '%.1f %%' % ( quality * 100 ) , aggregation , '%.3f' % dmax_shanum , '%.3f' % qmaxopt ] )
if plotguinier :
figsample . tight_layout ( )
figsample . savefig ( os . path . join ( ip . user_ns [ 'auximages_dir' ] , 'guinier_%s%s' % ( sn , graph_extension ) ) , dpi = 600 )
results [ sn ] = { 'Rg_autorg' : Rg , 'I0_autorg' : I0 , 'qmin_autorg' : qmin , 'qmax_autorg' : qmax , 'quality' : quality , 'aggregation' : aggregation , 'dmax_autorg' : dmax_shanum , 'qmax_shanum' : qmaxopt , 'Rg_gnom' : metadata [ 'Rg_gnom' ] , 'I0_gnom' : metadata [ 'I0_gnom' ] , 'qmin_gnom' : metadata [ 'qmin' ] , 'qmax_gnom' : metadata [ 'qmax' ] , 'dmin_gnom' : metadata [ 'dmin' ] , 'dmax_gnom' : metadata [ 'dmax' ] , 'VPorod' : vporod , }
axpr . set_xlabel ( 'r (nm)' )
axpr . set_ylabel ( 'P(r)' )
axpr . legend ( loc = 'best' )
axpr . grid ( True , which = 'both' )
writemarkdown ( '## Results from autorg and shanum' )
tab = ipy_table . IpyTable ( table_autorg )
tab . apply_theme ( 'basic' )
display ( tab )
writemarkdown ( '## Results from gnom' )
tab = ipy_table . IpyTable ( table_gnom )
tab . apply_theme ( 'basic' )
if prfunctions_postfix and prfunctions_postfix [ 0 ] != '_' :
prfunctions_postfix = '_' + prfunctions_postfix
figpr . tight_layout ( )
figpr . savefig ( os . path . join ( ip . user_ns [ 'auximages_dir' ] , 'prfunctions%s%s' % ( prfunctions_postfix , graph_extension ) ) , dpi = 600 )
display ( tab )
return results
|
def format_measure ( measure ) :
"""Get format and units for data coming from profiler task ."""
|
# Convert to a positive value .
measure = abs ( measure )
# For number of calls
if isinstance ( measure , int ) :
return to_text_string ( measure )
# For time measurements
if 1.e-9 < measure <= 1.e-6 :
measure = u"{0:.2f} ns" . format ( measure / 1.e-9 )
elif 1.e-6 < measure <= 1.e-3 :
measure = u"{0:.2f} us" . format ( measure / 1.e-6 )
elif 1.e-3 < measure <= 1 :
measure = u"{0:.2f} ms" . format ( measure / 1.e-3 )
elif 1 < measure <= 60 :
measure = u"{0:.2f} sec" . format ( measure )
elif 60 < measure <= 3600 :
m , s = divmod ( measure , 3600 )
if s > 60 :
m , s = divmod ( measure , 60 )
s = to_text_string ( s ) . split ( "." ) [ - 1 ]
measure = u"{0:.0f}.{1:.2s} min" . format ( m , s )
else :
h , m = divmod ( measure , 3600 )
if m > 60 :
m /= 60
measure = u"{0:.0f}h:{1:.0f}min" . format ( h , m )
return measure
|
def same_unit ( self , other : Union [ UnitTypeId , Set [ UnitTypeId ] , List [ UnitTypeId ] , Dict [ UnitTypeId , Any ] ] ) -> "Units" :
"""Usage :
' self . units . same _ tech ( UnitTypeId . COMMANDCENTER ) '
returns CommandCenter and CommandCenterFlying ,
' self . units . same _ tech ( UnitTypeId . ORBITALCOMMAND ) '
returns OrbitalCommand and OrbitalCommandFlying
This also works with a set / list / dict parameter , e . g . ' self . units . same _ tech ( { UnitTypeId . COMMANDCENTER , UnitTypeId . SUPPLYDEPOT } ) '
Untested : This should return the equivalents for WarpPrism , Observer , Overseer , SupplyDepot and others"""
|
if isinstance ( other , UnitTypeId ) :
other = { other }
unit_alias_types = set ( other )
for unitType in other :
unit_alias = self . game_data . units [ unitType . value ] . unit_alias
if unit_alias :
unit_alias_types . add ( unit_alias )
return self . filter ( lambda unit : unit . type_id in unit_alias_types or unit . _type_data . unit_alias is not None and unit . _type_data . unit_alias in unit_alias_types )
|
def get_expression_engine ( self , name ) :
"""Return an expression engine instance ."""
|
try :
return self . expression_engines [ name ]
except KeyError :
raise InvalidEngineError ( "Unsupported expression engine: {}" . format ( name ) )
|
def _configure_logger_handler ( cls , log_dest , log_filename ) :
"""Return a logging handler for the specified ` log _ dest ` , or ` None ` if
` log _ dest ` is ` None ` ."""
|
if log_dest is None :
return None
msg_format = '%(asctime)s-%(name)s-%(message)s'
if log_dest == 'stderr' : # Note : sys . stderr is the default stream for StreamHandler
handler = logging . StreamHandler ( )
handler . setFormatter ( logging . Formatter ( msg_format ) )
elif log_dest == 'file' :
if not log_filename :
raise ValueError ( "Log filename is required if log destination " "is 'file'" )
handler = logging . FileHandler ( log_filename , encoding = "UTF-8" )
handler . setFormatter ( logging . Formatter ( msg_format ) )
else :
raise ValueError ( _format ( "Invalid log destination: {0!A}; Must be one of: " "{1!A}" , log_dest , LOG_DESTINATIONS ) )
return handler
|
def rsa_public_key_pkcs8_to_pkcs1 ( pkcs8_key ) :
"""Convert a PKCS8 - encoded RSA private key to PKCS1."""
|
decoded_values = decoder . decode ( pkcs8_key , asn1Spec = PublicKeyInfo ( ) )
try :
decoded_key = decoded_values [ 0 ]
except IndexError :
raise ValueError ( "Invalid public key encoding." )
return decoded_key [ "publicKey" ] . asOctets ( )
|
def gelman_rubin ( x , return_var = False ) :
"""Returns estimate of R for a set of traces .
The Gelman - Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain .
If convergence has been achieved , the between - chain and within - chain
variances should be identical . To be most effective in detecting evidence
for nonconvergence , each chain should have been initialized to starting
values that are dispersed relative to the target distribution .
Parameters
x : array - like
An array containing the 2 or more traces of a stochastic parameter . That is , an array of dimension m x n x k , where m is the number of traces , n the number of samples , and k the dimension of the stochastic .
return _ var : bool
Flag for returning the marginal posterior variance instead of R - hat ( defaults of False ) .
Returns
Rhat : float
Return the potential scale reduction factor , : math : ` \ hat { R } `
Notes
The diagnostic is computed by :
. . math : : \ hat { R } = \ sqrt { \f rac { \ hat { V } } { W } }
where : math : ` W ` is the within - chain variance and : math : ` \ hat { V } ` is
the posterior variance estimate for the pooled traces . This is the
potential scale reduction factor , which converges to unity when each
of the traces is a sample from the target posterior . Values greater
than one indicate that one or more chains have not yet converged .
References
Brooks and Gelman ( 1998)
Gelman and Rubin ( 1992)"""
|
if np . shape ( x ) < ( 2 , ) :
raise ValueError ( 'Gelman-Rubin diagnostic requires multiple chains of the same length.' )
try :
m , n = np . shape ( x )
except ValueError :
return [ gelman_rubin ( np . transpose ( y ) ) for y in np . transpose ( x ) ]
# Calculate between - chain variance
B_over_n = np . sum ( ( np . mean ( x , 1 ) - np . mean ( x ) ) ** 2 ) / ( m - 1 )
# Calculate within - chain variances
W = np . sum ( [ ( x [ i ] - xbar ) ** 2 for i , xbar in enumerate ( np . mean ( x , 1 ) ) ] ) / ( m * ( n - 1 ) )
# ( over ) estimate of variance
s2 = W * ( n - 1 ) / n + B_over_n
if return_var :
return s2
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return np . sqrt ( R )
|
def close_socket ( self ) :
"""close socket"""
|
self . _socket_lock . acquire ( )
self . _force_close_session ( )
self . _socket_lock . release ( )
|
def _get ( self , * args , ** kwargs ) :
"""Make a GET request ."""
|
return self . _request ( requests . get , * args , ** kwargs )
|
def get_dimord ( measure , calc = None , community = None ) :
"""Get the dimension order of a network measure .
Parameters
measure : str
Name of funciton in teneto . networkmeasures .
calc : str , default = None
Calc parameter for the function
community : bool , default = None
If not null , then community property is assumed to be believed .
Returns
dimord : str
Dimension order . So " node , node , time " would define the dimensions of the network measure ."""
|
if not calc :
calc = ''
else :
calc = '_' + calc
if not community :
community = ''
else :
community = 'community'
if 'community' in calc and 'community' in community :
community = ''
if calc == 'community_avg' or calc == 'community_pairs' :
community = ''
dimord_dict = { 'temporal_closeness_centrality' : 'node' , 'temporal_degree_centrality' : 'node' , 'temporal_degree_centralit_avg' : 'node' , 'temporal_degree_centrality_time' : 'node,time' , 'temporal_efficiency' : 'global' , 'temporal_efficiency_global' : 'global' , 'temporal_efficiency_node' : 'node' , 'temporal_efficiency_to' : 'node' , 'sid_global' : 'global,time' , 'community_pairs' : 'community,community,time' , 'community_avg' : 'community,time' , 'sid' : 'community,community,time' , 'reachability_latency_global' : 'global' , 'reachability_latency' : 'global' , 'reachability_latency_node' : 'node' , 'fluctuability' : 'node' , 'fluctuability_global' : 'global' , 'bursty_coeff' : 'edge,edge' , 'bursty_coeff_edge' : 'edge,edge' , 'bursty_coeff_node' : 'node' , 'bursty_coeff_meanEdgePerNode' : 'node' , 'volatility_global' : 'time' , }
if measure + calc + community in dimord_dict :
return dimord_dict [ measure + calc + community ]
else :
print ( 'WARNINGL: get_dimord() returned unknown dimension labels' )
return 'unknown'
|
def _usernamesToSidObjects ( cls , val , ** kwargs ) :
'''converts a list of usernames to sid objects'''
|
if not val :
return val
if isinstance ( val , six . string_types ) :
val = val . split ( ',' )
sids = [ ]
for _user in val :
try :
sid = win32security . LookupAccountName ( '' , _user ) [ 0 ]
sids . append ( sid )
# This needs to be more specific
except Exception as e :
log . exception ( 'Handle this explicitly' )
raise CommandExecutionError ( ( 'There was an error obtaining the SID of user "{0}". Error ' 'returned: {1}' ) . format ( _user , e ) )
return sids
|
def input_loop ( ) :
'''wait for user input'''
|
global operation_takeoff
global time_init_operation_takeoff
global time_end_operation_takeoff
while mpstate . status . exit != True :
try :
if mpstate . status . exit != True :
if mpstate . udp . bound ( ) :
line = mpstate . udp . readln ( )
mpstate . udp . writeln ( line )
elif mpstate . tcp . connected ( ) :
line = mpstate . tcp . readln ( )
mpstate . tcp . writeln ( line )
else :
line = input ( mpstate . rl . prompt )
if line == 'takeoff' :
print ( "Detecto takeoff" )
operation_takeoff = True
time_init_operation_takeoff = int ( round ( time . time ( ) * 1000 ) )
time_end_operation_takeoff = time_init_operation_takeoff + 5000
print ( time_end_operation_takeoff )
mpstate . input_queue . put ( "arm throttle" )
return
if line == 'land' :
print ( "Orden de aterrizar" )
on_air = False
except EOFError :
mpstate . status . exit = True
sys . exit ( 1 )
mpstate . input_queue . put ( line )
|
def submit ( self , data , runtime_dir , argv ) :
"""Run process with SLURM .
For details , see
: meth : ` ~ resolwe . flow . managers . workload _ connectors . base . BaseConnector . submit ` ."""
|
limits = data . process . get_resource_limits ( )
logger . debug ( __ ( "Connector '{}' running for Data with id {} ({})." , self . __class__ . __module__ , data . id , repr ( argv ) ) )
# Compute target partition .
partition = getattr ( settings , 'FLOW_SLURM_PARTITION_DEFAULT' , None )
if data . process . slug in getattr ( settings , 'FLOW_SLURM_PARTITION_OVERRIDES' , { } ) :
partition = settings . FLOW_SLURM_PARTITION_OVERRIDES [ data . process . slug ]
try : # Make sure the resulting file is executable on creation .
script_path = os . path . join ( runtime_dir , 'slurm.sh' )
file_descriptor = os . open ( script_path , os . O_WRONLY | os . O_CREAT , mode = 0o555 )
with os . fdopen ( file_descriptor , 'wt' ) as script :
script . write ( '#!/bin/bash\n' )
script . write ( '#SBATCH --mem={}M\n' . format ( limits [ 'memory' ] + EXECUTOR_MEMORY_OVERHEAD ) )
script . write ( '#SBATCH --cpus-per-task={}\n' . format ( limits [ 'cores' ] ) )
if partition :
script . write ( '#SBATCH --partition={}\n' . format ( partition ) )
# Render the argument vector into a command line .
line = ' ' . join ( map ( shlex . quote , argv ) )
script . write ( line + '\n' )
command = [ '/usr/bin/env' , 'sbatch' , script_path ]
subprocess . Popen ( command , cwd = runtime_dir , stdin = subprocess . DEVNULL ) . wait ( )
except OSError as err :
logger . error ( __ ( "OSError occurred while preparing SLURM script for Data {}: {}" , data . id , err ) )
|
def rows ( self ) :
"""Return / yield tuples or lists corresponding to each row to be inserted ."""
|
with self . input ( ) . open ( 'r' ) as fobj :
for line in fobj :
yield line . strip ( '\n' ) . split ( '\t' )
|
def is_sparse_vector ( x ) :
"""x is a 2D sparse matrix with it ' s first shape equal to 1."""
|
return sp . issparse ( x ) and len ( x . shape ) == 2 and x . shape [ 0 ] == 1
|
def max_tab_name_length_changed ( self , settings , key , user_data ) :
"""If the gconf var max _ tab _ name _ length be changed , this method will
be called and will set the tab name length limit ."""
|
# avoid get window title before terminal is ready
if self . guake . notebook_manager . get_current_notebook ( ) . get_current_terminal ( ) is None :
return
# avoid get window title before terminal is ready
if self . guake . notebook_manager . get_current_notebook ( ) . get_current_terminal ( ) . get_window_title ( ) is None :
return
self . guake . recompute_tabs_titles ( )
|
def _get_cache_key ( self , obj ) :
"""Derive cache key for given object ."""
|
if obj is not None : # Make sure that key is REALLY unique .
return '{}-{}' . format ( id ( self ) , obj . pk )
return "{}-None" . format ( id ( self ) )
|
def get_upload_key_metadata ( self ) :
"""Generate metadata dictionary from a bucket key ."""
|
key = self . get_upload_key ( )
metadata = key . metadata . copy ( )
# Some http header properties which are stored on the key need to be
# copied to the metadata when updating
headers = { # http header name , key attribute name
'Cache-Control' : 'cache_control' , 'Content-Type' : 'content_type' , 'Content-Disposition' : 'content_disposition' , 'Content-Encoding' : 'content_encoding' , }
for header_name , attribute_name in headers . items ( ) :
attribute_value = getattr ( key , attribute_name , False )
if attribute_value :
metadata . update ( { b'{0}' . format ( header_name ) : b'{0}' . format ( attribute_value ) } )
return metadata
|
def do_fish_complete ( cli , prog_name ) :
"""Do the fish completion
Parameters
cli : click . Command
The main click Command of the program
prog _ name : str
The program name on the command line
Returns
bool
True if the completion was successful , False otherwise"""
|
commandline = os . environ [ 'COMMANDLINE' ]
args = split_args ( commandline ) [ 1 : ]
if args and not commandline . endswith ( ' ' ) :
incomplete = args [ - 1 ]
args = args [ : - 1 ]
else :
incomplete = ''
for item , help in get_choices ( cli , prog_name , args , incomplete ) :
if help :
echo ( "%s\t%s" % ( item , re . sub ( '\s' , ' ' , help ) ) )
else :
echo ( item )
return True
|
def find_existing_record ( env , zone_id , dns_name , check_key = None , check_value = None ) :
"""Check if a specific DNS record exists .
Args :
env ( str ) : Deployment environment .
zone _ id ( str ) : Route53 zone id .
dns _ name ( str ) : FQDN of application ' s dns entry to add / update .
check _ key ( str ) : Key to look for in record . Example : " Type "
check _ value ( str ) : Value to look for with check _ key . Example : " CNAME "
Returns :
json : Found Record . Returns None if no record found"""
|
client = boto3 . Session ( profile_name = env ) . client ( 'route53' )
pager = client . get_paginator ( 'list_resource_record_sets' )
existingrecord = None
for rset in pager . paginate ( HostedZoneId = zone_id ) :
for record in rset [ 'ResourceRecordSets' ] :
if check_key :
if record [ 'Name' ] . rstrip ( '.' ) == dns_name and record . get ( check_key ) == check_value :
LOG . info ( "Found existing record: %s" , record )
existingrecord = record
break
return existingrecord
|
def get_uniformly_controlled_rotation_matrix ( k ) :
"""Returns the matrix represented by : math : ` M _ { ij } ` in arXiv : quant - ph / 0407010.
This matrix converts the angles of : math : ` k ` - fold uniformly
controlled rotations to the angles of the efficient gate decomposition .
: param int k : number of control qubits
: return : the matrix : math : ` M _ { ij } `
: rtype : 2darray"""
|
M = np . full ( ( 2 ** k , 2 ** k ) , 2 ** - k )
for i in range ( 2 ** k ) :
g_i = i ^ ( i >> 1 )
# Gray code for i
for j in range ( 2 ** k ) :
M [ i , j ] *= ( - 1 ) ** ( bin ( j & g_i ) . count ( "1" ) )
return M
|
def get_vars_in_expression ( source ) :
'''Get list of variable names in a python expression .'''
|
import compiler
from compiler . ast import Node
# @ brief Internal recursive function .
# @ param node An AST parse Node .
# @ param var _ list Input list of variables .
# @ return An updated list of variables .
def get_vars_body ( node , var_list = [ ] ) :
if isinstance ( node , Node ) :
if node . __class__ . __name__ == 'Name' :
for child in node . getChildren ( ) :
if child not in var_list :
var_list . append ( child )
for child in node . getChildren ( ) :
if isinstance ( child , Node ) :
for child in node . getChildren ( ) :
var_list = get_vars_body ( child , var_list )
break
return var_list
return get_vars_body ( compiler . parse ( source ) )
|
def Builder ( ** kw ) :
"""A factory for builder objects ."""
|
composite = None
if 'generator' in kw :
if 'action' in kw :
raise UserError ( "You must not specify both an action and a generator." )
kw [ 'action' ] = SCons . Action . CommandGeneratorAction ( kw [ 'generator' ] , { } )
del kw [ 'generator' ]
elif 'action' in kw :
source_ext_match = kw . get ( 'source_ext_match' , 1 )
if 'source_ext_match' in kw :
del kw [ 'source_ext_match' ]
if SCons . Util . is_Dict ( kw [ 'action' ] ) :
composite = DictCmdGenerator ( kw [ 'action' ] , source_ext_match )
kw [ 'action' ] = SCons . Action . CommandGeneratorAction ( composite , { } )
kw [ 'src_suffix' ] = composite . src_suffixes ( )
else :
kw [ 'action' ] = SCons . Action . Action ( kw [ 'action' ] )
if 'emitter' in kw :
emitter = kw [ 'emitter' ]
if SCons . Util . is_String ( emitter ) : # This allows users to pass in an Environment
# variable reference ( like " $ FOO " ) as an emitter .
# We will look in that Environment variable for
# a callable to use as the actual emitter .
var = SCons . Util . get_environment_var ( emitter )
if not var :
raise UserError ( "Supplied emitter '%s' does not appear to refer to an Environment variable" % emitter )
kw [ 'emitter' ] = EmitterProxy ( var )
elif SCons . Util . is_Dict ( emitter ) :
kw [ 'emitter' ] = DictEmitter ( emitter )
elif SCons . Util . is_List ( emitter ) :
kw [ 'emitter' ] = ListEmitter ( emitter )
result = BuilderBase ( ** kw )
if not composite is None :
result = CompositeBuilder ( result , composite )
return result
|
def download ( branch = None , build = True , installdir = "MalmoPlatform" ) :
"""Download Malmo from github and build ( by default ) the Minecraft Mod .
Example usage : import malmoenv . bootstrap ; malmoenv . bootstrap . download ( )
Args :
branch : optional branch to clone . TODO Default is release version .
build : build the Mod unless build arg is given as False .
installdir : the install dir name . Defaults to MalmoPlatform .
Returns :
The path for the Malmo Minecraft mod ."""
|
if branch is None :
branch = malmo_version
subprocess . check_call ( [ "git" , "clone" , "-b" , branch , "https://github.com/Microsoft/malmo.git" , installdir ] )
return setup ( build = build , installdir = installdir )
|
def get_port_channel_detail_output_lacp_admin_key ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_port_channel_detail = ET . Element ( "get_port_channel_detail" )
config = get_port_channel_detail
output = ET . SubElement ( get_port_channel_detail , "output" )
lacp = ET . SubElement ( output , "lacp" )
admin_key = ET . SubElement ( lacp , "admin-key" )
admin_key . text = kwargs . pop ( 'admin_key' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _check_rows ( rows , check , in_range = True , return_test = 'any' ) :
"""Check all rows to be in / out of a certain range and provide testing on
return values based on provided conditions
Parameters
rows : pd . DataFrame
data rows
check : dict
dictionary with possible values of ' up ' , ' lo ' , and ' year '
in _ range : bool , optional
check if values are inside or outside of provided range
return _ test : str , optional
possible values :
- ' any ' : default , return scenarios where check passes for any entry
- ' all ' : test if all values match checks , if not , return empty set"""
|
valid_checks = set ( [ 'up' , 'lo' , 'year' ] )
if not set ( check . keys ( ) ) . issubset ( valid_checks ) :
msg = 'Unknown checking type: {}'
raise ValueError ( msg . format ( check . keys ( ) - valid_checks ) )
where_idx = set ( rows . index [ rows [ 'year' ] == check [ 'year' ] ] ) if 'year' in check else set ( rows . index )
rows = rows . loc [ list ( where_idx ) ]
up_op = rows [ 'value' ] . __le__ if in_range else rows [ 'value' ] . __gt__
lo_op = rows [ 'value' ] . __ge__ if in_range else rows [ 'value' ] . __lt__
check_idx = [ ]
for ( bd , op ) in [ ( 'up' , up_op ) , ( 'lo' , lo_op ) ] :
if bd in check :
check_idx . append ( set ( rows . index [ op ( check [ bd ] ) ] ) )
if return_test is 'any' :
ret = where_idx & set . union ( * check_idx )
elif return_test == 'all' :
ret = where_idx if where_idx == set . intersection ( * check_idx ) else set ( )
else :
raise ValueError ( 'Unknown return test: {}' . format ( return_test ) )
return ret
|
def handler_view ( self , request , resource_name , ids = None ) :
"""Handler for resources .
. . versionadded : : 0.5.7
Content - Type check
: return django . http . HttpResponse"""
|
signal_request . send ( sender = self , request = request )
time_start = time . time ( )
self . update_urls ( request , resource_name = resource_name , ids = ids )
resource = self . resource_map [ resource_name ]
allowed_http_methods = resource . Meta . allowed_methods
if request . method not in allowed_http_methods :
response = HttpResponseNotAllowed ( permitted_methods = allowed_http_methods )
signal_response . send ( sender = self , request = request , response = response , duration = time . time ( ) - time_start )
return response
if resource . Meta . authenticators and not ( request . method == "GET" and resource . Meta . disable_get_authentication ) :
user = resource . authenticate ( request )
if user is None or not user . is_authenticated ( ) :
response = HttpResponse ( "Not Authenticated" , status = 401 )
signal_response . send ( sender = self , request = request , response = response , duration = time . time ( ) - time_start )
return response
kwargs = dict ( request = request )
if ids is not None :
kwargs [ 'ids' ] = ids . split ( "," )
try :
if request . method == "GET" :
response = self . handler_view_get ( resource , ** kwargs )
elif request . method == "POST" :
response = self . handler_view_post ( resource , ** kwargs )
elif request . method == "PUT" :
response = self . handler_view_put ( resource , ** kwargs )
elif request . method == "DELETE" :
response = self . handler_view_delete ( resource , ** kwargs )
except JSONAPIError as e :
response = HttpResponse ( json . dumps ( { "errors" : [ e . data ] } , cls = DatetimeDecimalEncoder ) , content_type = self . CONTENT_TYPE , status = e . status )
signal_response . send ( sender = self , request = request , response = response , duration = time . time ( ) - time_start )
return response
|
def list_tar ( archive , compression , cmd , verbosity , interactive ) :
"""List a TAR archive ."""
|
cmdlist = [ cmd , '--list' ]
add_tar_opts ( cmdlist , compression , verbosity )
cmdlist . extend ( [ "--file" , archive ] )
return cmdlist
|
def __initialize_model ( self ) :
"""Initializes the Model ."""
|
LOGGER . debug ( "> Initializing model." )
self . beginResetModel ( )
self . root_node = umbra . ui . nodes . DefaultNode ( name = "InvisibleRootNode" )
self . __default_project_node = ProjectNode ( name = self . __default_project , parent = self . root_node , node_flags = int ( Qt . ItemIsEnabled ) , attributes_flags = int ( Qt . ItemIsEnabled ) )
self . enable_model_triggers ( True )
self . endResetModel ( )
|
def get_param ( self , name ) :
"""Get a WinDivert parameter . See pydivert . Param for the list of parameters .
The remapped function is WinDivertGetParam : :
BOOL WinDivertGetParam (
_ _ in HANDLE handle ,
_ _ in WINDIVERT _ PARAM param ,
_ _ out UINT64 * pValue
For more info on the C call visit : http : / / reqrypt . org / windivert - doc . html # divert _ get _ param
: return : The parameter value ."""
|
value = c_uint64 ( 0 )
windivert_dll . WinDivertGetParam ( self . _handle , name , byref ( value ) )
return value . value
|
def mkdir ( self , path , mode = o777 ) :
"""Create a folder ( directory ) named ` ` path ` ` with numeric mode ` ` mode ` ` .
The default mode is 0777 ( octal ) . On some systems , mode is ignored .
Where it is used , the current umask value is first masked out .
: param str path : name of the folder to create
: param int mode : permissions ( posix - style ) for the newly - created folder"""
|
path = self . _adjust_cwd ( path )
self . _log ( DEBUG , "mkdir({!r}, {!r})" . format ( path , mode ) )
attr = SFTPAttributes ( )
attr . st_mode = mode
self . _request ( CMD_MKDIR , path , attr )
|
def traced_function ( func = None , name = None , on_start = None , require_active_trace = False ) :
"""A decorator that enables tracing of the wrapped function or
Tornado co - routine provided there is a parent span already established .
. . code - block : : python
@ traced _ function
def my _ function1 ( arg1 , arg2 = None )
: param func : decorated function or Tornado co - routine
: param name : optional name to use as the Span . operation _ name .
If not provided , func . _ _ name _ _ will be used .
: param on _ start : an optional callback to be executed once the child span
is started , but before the decorated function is called . It can be
used to set any additional tags on the span , perhaps by inspecting
the decorated function arguments . The callback must have a signature
` ( span , * args , * kwargs ) ` , where the last two collections are the
arguments passed to the actual decorated function .
. . code - block : : python
def extract _ call _ site _ tag ( span , * args , * kwargs )
if ' call _ site _ tag ' in kwargs :
span . set _ tag ( ' call _ site _ tag ' , kwargs [ ' call _ site _ tag ' ] )
@ traced _ function ( on _ start = extract _ call _ site _ tag )
@ tornado . get . coroutine
def my _ function ( arg1 , arg2 = None , call _ site _ tag = None )
: param require _ active _ trace : controls what to do when there is no active
trace . If require _ active _ trace = True , then no span is created .
If require _ active _ trace = False , a new trace is started .
: return : returns a tracing decorator"""
|
if func is None :
return functools . partial ( traced_function , name = name , on_start = on_start , require_active_trace = require_active_trace )
if name :
operation_name = name
else :
operation_name = func . __name__
@ functools . wraps ( func )
def decorator ( * args , ** kwargs ) :
parent_span = get_current_span ( )
if parent_span is None and require_active_trace :
return func ( * args , ** kwargs )
span = utils . start_child_span ( operation_name = operation_name , parent = parent_span )
if callable ( on_start ) :
on_start ( span , * args , ** kwargs )
# We explicitly invoke deactivation callback for the StackContext ,
# because there are scenarios when it gets retained forever , for
# example when a Periodic Callback is scheduled lazily while in the
# scope of a tracing StackContext .
with span_in_stack_context ( span ) as deactivate_cb :
try :
res = func ( * args , ** kwargs )
# Tornado co - routines usually return futures , so we must wait
# until the future is completed , in order to accurately
# capture the function ' s execution time .
if tornado . concurrent . is_future ( res ) :
def done_callback ( future ) :
deactivate_cb ( )
exception = future . exception ( )
if exception is not None :
span . log ( event = 'exception' , payload = exception )
span . set_tag ( 'error' , 'true' )
span . finish ( )
res . add_done_callback ( done_callback )
else :
deactivate_cb ( )
span . finish ( )
return res
except Exception as e :
deactivate_cb ( )
span . log ( event = 'exception' , payload = e )
span . set_tag ( 'error' , 'true' )
span . finish ( )
raise
return decorator
|
def uncomment ( name , regex , char = '#' , backup = '.bak' ) :
'''Uncomment specified commented lines in a file
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented .
This regex should not include the comment character . A leading ` ` ^ ` `
character will be stripped for convenience ( for easily switching
between comment ( ) and uncomment ( ) ) . The regex will be searched for
from the beginning of the line , ignoring leading spaces ( we prepend
char : ` ` # ` `
The character to remove in order to uncomment a line
backup : ` ` . bak ` `
The file will be backed up before edit with this file extension ;
. . warning : :
This backup will be overwritten each time ` ` sed ` ` / ` ` comment ` ` /
` ` uncomment ` ` is called . Meaning the backup will only be useful
after the first invocation .
Set to False / None to not keep a backup .
Usage :
. . code - block : : yaml
/ etc / adduser . conf :
file . uncomment :
- regex : EXTRA _ GROUPS
. . versionadded : : 0.9.5'''
|
name = os . path . expanduser ( name )
ret = { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : '' }
if not name :
return _error ( ret , 'Must provide name to file.uncomment' )
check_res , check_msg = _check_file ( name )
if not check_res :
return _error ( ret , check_msg )
# Make sure the pattern appears in the file
if __salt__ [ 'file.search' ] ( name , '{0}[ \t]*{1}' . format ( char , regex . lstrip ( '^' ) ) , multiline = True ) : # Line exists and is commented
pass
elif __salt__ [ 'file.search' ] ( name , '^[ \t]*{0}' . format ( regex . lstrip ( '^' ) ) , multiline = True ) :
ret [ 'comment' ] = 'Pattern already uncommented'
ret [ 'result' ] = True
return ret
else :
return _error ( ret , '{0}: Pattern not found' . format ( regex ) )
if __opts__ [ 'test' ] :
ret [ 'changes' ] [ name ] = 'updated'
ret [ 'comment' ] = 'File {0} is set to be updated' . format ( name )
ret [ 'result' ] = None
return ret
with salt . utils . files . fopen ( name , 'rb' ) as fp_ :
slines = salt . utils . data . decode ( fp_ . readlines ( ) )
# Perform the edit
__salt__ [ 'file.comment_line' ] ( name , regex , char , False , backup )
with salt . utils . files . fopen ( name , 'rb' ) as fp_ :
nlines = salt . utils . data . decode ( fp_ . readlines ( ) )
# Check the result
ret [ 'result' ] = __salt__ [ 'file.search' ] ( name , '^[ \t]*{0}' . format ( regex . lstrip ( '^' ) ) , multiline = True )
if slines != nlines :
if not __utils__ [ 'files.is_text' ] ( name ) :
ret [ 'changes' ] [ 'diff' ] = 'Replace binary file'
else : # Changes happened , add them
ret [ 'changes' ] [ 'diff' ] = ( '' . join ( difflib . unified_diff ( slines , nlines ) ) )
if ret [ 'result' ] :
ret [ 'comment' ] = 'Uncommented lines successfully'
else :
ret [ 'comment' ] = 'Expected uncommented lines not found'
return ret
|
def split_scoped_hparams ( scopes , merged_hparams ) :
"""Split single HParams with scoped keys into multiple ."""
|
split_values = { scope : { } for scope in scopes }
merged_values = merged_hparams . values ( )
for scoped_key , value in six . iteritems ( merged_values ) :
scope = scoped_key . split ( "." ) [ 0 ]
key = scoped_key [ len ( scope ) + 1 : ]
split_values [ scope ] [ key ] = value
return [ hparam . HParams ( ** split_values [ scope ] ) for scope in scopes ]
|
def _get_object_as_soft ( self ) :
"""Get the object as SOFT formated string ."""
|
soft = [ "^%s = %s" % ( self . geotype , self . name ) , self . _get_metadata_as_string ( ) , self . _get_columns_as_string ( ) , self . _get_table_as_string ( ) ]
return "\n" . join ( soft )
|
def construct ( self , response_args , request , ** kwargs ) :
"""Construct the response
: param response _ args : response arguments
: param request : The parsed request , a self . request _ cls class instance
: param kwargs : Extra keyword arguments
: return : An instance of the self . response _ cls class"""
|
response_args = self . do_pre_construct ( response_args , request , ** kwargs )
# logger . debug ( " kwargs : % s " % sanitize ( kwargs ) )
response = self . response_cls ( ** response_args )
return self . do_post_construct ( response , request , ** kwargs )
|
def extract_row ( self , row ) :
"""get row number ' row '"""
|
new_row = [ ]
for col in range ( self . get_grid_width ( ) ) :
new_row . append ( self . get_tile ( row , col ) )
return new_row
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.