signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def infos ( self , type = None , failed = False ) :
"""Get infos that originate from this node .
Type must be a subclass of : class : ` ~ dallinger . models . Info ` , the default is
` ` Info ` ` . Failed can be True , False or " all " ."""
|
if type is None :
type = Info
if not issubclass ( type , Info ) :
raise TypeError ( "Cannot get infos of type {} " "as it is not a valid type." . format ( type ) )
if failed not in [ "all" , False , True ] :
raise ValueError ( "{} is not a valid vector failed" . format ( failed ) )
if failed == "all" :
return type . query . filter_by ( origin_id = self . id ) . all ( )
else :
return type . query . filter_by ( origin_id = self . id , failed = failed ) . all ( )
|
def _get_all_cwlkeys ( items , default_keys = None ) :
"""Retrieve cwlkeys from inputs , handling defaults which can be null .
When inputs are null in some and present in others , this creates unequal
keys in each sample , confusing decision making about which are primary and extras ."""
|
if default_keys :
default_keys = set ( default_keys )
else :
default_keys = set ( [ "metadata__batch" , "config__algorithm__validate" , "config__algorithm__validate_regions" , "config__algorithm__validate_regions_merged" , "config__algorithm__variant_regions" , "validate__summary" , "validate__tp" , "validate__fp" , "validate__fn" , "config__algorithm__coverage" , "config__algorithm__coverage_merged" , "genome_resources__variation__cosmic" , "genome_resources__variation__dbsnp" , "genome_resources__variation__clinvar" ] )
all_keys = set ( [ ] )
for data in items :
all_keys . update ( set ( data [ "cwl_keys" ] ) )
all_keys . update ( default_keys )
return all_keys
|
def load_mldataset ( filename ) :
"""Not particularly fast code to parse the text file and load it into three NDArray ' s
and product an NDArrayIter"""
|
user = [ ]
item = [ ]
score = [ ]
with open ( filename ) as f :
for line in f :
tks = line . strip ( ) . split ( '\t' )
if len ( tks ) != 4 :
continue
user . append ( int ( tks [ 0 ] ) )
item . append ( int ( tks [ 1 ] ) )
score . append ( float ( tks [ 2 ] ) )
user = mx . nd . array ( user )
item = mx . nd . array ( item )
score = mx . nd . array ( score )
return gluon . data . ArrayDataset ( user , item , score )
|
def compute_Pi_V_given_J ( self , CDR3_seq , V_usage_mask , J_usage_mask ) :
"""Compute Pi _ V conditioned on J .
This function returns the Pi array from the model factors of the V genomic
contributions , P ( V , J ) * P ( delV | V ) . This corresponds to V ( J ) _ { x _ 1 } .
For clarity in parsing the algorithm implementation , we include which
instance attributes are used in the method as ' parameters . '
Parameters
CDR3 _ seq : str
CDR3 sequence composed of ' amino acids ' ( single character symbols
each corresponding to a collection of codons as given by codons _ dict ) .
V _ usage _ mask : list
Indices of the V alleles to be considered in the Pgen computation
J _ usage _ mask : list
Indices of the J alleles to be considered in the Pgen computation
self . cutV _ genomic _ CDR3 _ segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended .
self . PVdelV _ nt _ pos _ vec : list of ndarrays
For each V allele , format P ( delV | V ) into the correct form for a Pi
array or V ( J ) _ { x _ 1 } . This is only done for the first and last
position in each codon .
self . PVdelV _ 2nd _ nt _ pos _ per _ aa _ vec : list of dicts
For each V allele , and each ' amino acid ' , format P ( V ) * P ( delV | V ) for
positions in the middle of a codon into the correct form for a Pi
array or V ( J ) _ { x _ 1 } given the ' amino acid ' .
self . PVJ : ndarray
Joint probability distribution of V and J , P ( V , J ) .
Returns
Pi _ V _ given _ J : list
List of ( 4 , 3L ) ndarrays corresponding to V ( J ) _ { x _ 1 } .
max _ V _ align : int
Maximum alignment of the CDR3 _ seq to any genomic V allele allowed by
V _ usage _ mask ."""
|
# Note , the cutV _ genomic _ CDR3 _ segs INCLUDE the palindromic insertions and thus are max _ palindrome nts longer than the template .
# furthermore , the genomic sequence should be pruned to start at the conserved C
Pi_V_given_J = [ np . zeros ( ( 4 , len ( CDR3_seq ) * 3 ) ) for i in J_usage_mask ]
# Holds the aggregate weight for each nt possiblity and position
alignment_lengths = [ ]
for V_in in V_usage_mask :
try :
cutV_gen_seg = self . cutV_genomic_CDR3_segs [ V_in ]
except IndexError :
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self . max_nt_to_aa_alignment_left ( CDR3_seq , cutV_gen_seg )
alignment_lengths += [ current_alignment_length ]
current_Pi_V = np . zeros ( ( 4 , len ( CDR3_seq ) * 3 ) )
if current_alignment_length > 0 : # For first and last nt in a codon use PVdelV _ nt _ pos _ vec
current_Pi_V [ : , : current_alignment_length ] = self . PVdelV_nt_pos_vec [ V_in ] [ : , : current_alignment_length ]
for pos in range ( 1 , current_alignment_length , 3 ) : # for middle nt use PVdelV _ 2nd _ nt _ pos _ per _ aa _ vec
current_Pi_V [ : , pos ] = self . PVdelV_2nd_nt_pos_per_aa_vec [ V_in ] [ CDR3_seq [ pos / 3 ] ] [ : , pos ]
for j , J_in in enumerate ( J_usage_mask ) :
Pi_V_given_J [ j ] [ : , : current_alignment_length ] += self . PVJ [ V_in , J_in ] * current_Pi_V [ : , : current_alignment_length ]
return Pi_V_given_J , max ( alignment_lengths )
|
def write ( self , fileobj = sys . stdout , indent = u"" ) :
"""Recursively write an element and it ' s children to a file ."""
|
fileobj . write ( self . start_tag ( indent ) )
fileobj . write ( u"\n" )
|
def disable ( cls , args ) :
"""Disable subcommand ."""
|
mgr = NAppsManager ( )
if args [ 'all' ] :
napps = mgr . get_enabled ( )
else :
napps = args [ '<napp>' ]
for napp in napps :
mgr . set_napp ( * napp )
LOG . info ( 'NApp %s:' , mgr . napp_id )
cls . disable_napp ( mgr )
|
def matchup ( self ) :
"""Return the game meta information displayed in report banners including team names ,
final score , game date , location , and attendance . Data format is
. . code : : python
' home ' : home ,
' away ' : away ,
' final ' : final ,
' attendance ' : att ,
' date ' : date ,
' location ' : loc
: returns : matchup banner info
: rtype : dict"""
|
if self . play_by_play . matchup :
return self . play_by_play . matchup
elif self . rosters . matchup :
return self . rosters . matchup
elif self . toi . matchup :
return self . toi . matchup
else :
self . face_off_comp . matchup
|
def parse_kv_args ( self , args ) :
"""parse key - value style arguments"""
|
for arg in [ "start" , "end" , "count" , "stride" ] :
try :
arg_raw = args . pop ( arg , None )
if arg_raw is None :
continue
arg_cooked = int ( arg_raw , 0 )
setattr ( self , arg , arg_cooked )
except ValueError :
raise AnsibleError ( "can't parse arg %s=%r as integer" % ( arg , arg_raw ) )
if 'format' in args :
self . format = args . pop ( "format" )
if args :
raise AnsibleError ( "unrecognized arguments to with_sequence: %r" % args . keys ( ) )
|
def get_desc2nts_fnc ( self , hdrgo_prt = True , section_prt = None , top_n = None , use_sections = True ) :
"""Return grouped , sorted namedtuples in either format : flat , sections ."""
|
# RETURN : flat list of namedtuples
nts_flat = self . get_nts_flat ( hdrgo_prt , use_sections )
if nts_flat :
flds = nts_flat [ 0 ] . _fields
if not use_sections :
return { 'sortobj' : self , 'flat' : nts_flat , 'hdrgo_prt' : hdrgo_prt , 'flds' : flds , 'num_items' : len ( nts_flat ) , 'num_sections' : 1 }
else :
return { 'sortobj' : self , 'sections' : [ ( self . grprobj . hdrobj . secdflt , nts_flat ) ] , 'hdrgo_prt' : hdrgo_prt , 'flds' : flds , 'num_items' : len ( nts_flat ) , 'num_sections' : 1 }
# print ( ' FFFF Sorter : get _ desc2nts _ fnc : nts _ flat is None ' )
# RETURN : 2 - D list [ ( section _ name0 , namedtuples0 ) , ( section _ name1 , namedtuples1 ) , . . .
# kws : top _ n hdrgo _ prt section _ sortby
# Over - ride hdrgo _ prt depending on top _ n value
assert top_n is not True and top_n is not False , "top_n({T}) MUST BE None OR AN int" . format ( T = top_n )
assert self . sectobj is not None , "SECTIONS OBJECT DOES NOT EXIST"
sec_sb = self . sectobj . section_sortby
# Override hdrgo _ prt , if sorting by sections or returning a subset of GO IDs in section
hdrgo_prt_curr = hdrgo_prt is True
if sec_sb is True or ( sec_sb is not False and sec_sb is not None ) or top_n is not None :
hdrgo_prt_curr = False
# print ( ' GGGG Sorter : get _ desc2nts _ fnc : hdrgo _ prt _ curr ( { } ) sec _ sb ( { } ) top _ n ( { } ) ' . format (
# hdrgo _ prt _ curr , sec _ sb , top _ n ) )
nts_section = self . sectobj . get_sorted_nts_keep_section ( hdrgo_prt_curr )
# print ( ' HHHH Sorter : get _ desc2nts _ fnc : nts _ section ' )
# Take top _ n in each section , if requested
if top_n is not None :
nts_section = [ ( s , nts [ : top_n ] ) for s , nts in nts_section ]
if section_prt is None :
nts_flat = self . get_sections_flattened ( nts_section )
flds = nts_flat [ 0 ] . _fields if nts_flat else [ ]
return { 'sortobj' : self , 'flat' : nts_flat , 'hdrgo_prt' : hdrgo_prt_curr , 'flds' : flds , 'num_items' : len ( nts_flat ) , 'num_sections' : 1 }
# Send flat list of sections nts back , as requested
if section_prt is False :
nts_flat = self . get_sections_flattened ( nts_section )
flds = nts_flat [ 0 ] . _fields if nts_flat else [ ]
return { 'sortobj' : self , 'flat' : nts_flat , 'hdrgo_prt' : hdrgo_prt_curr , 'flds' : flds , 'num_items' : len ( nts_flat ) , 'num_sections' : len ( nts_section ) }
# Send 2 - D sections nts back
# print ( ' IIII Sorter : get _ desc2nts _ fnc : nts _ section ' )
flds = nts_section [ 0 ] [ 1 ] [ 0 ] . _fields if nts_section else [ ]
return { 'sortobj' : self , 'sections' : nts_section , 'hdrgo_prt' : hdrgo_prt_curr , 'flds' : flds , 'num_items' : sum ( len ( nts ) for _ , nts in nts_section ) , 'num_sections' : len ( nts_section ) }
|
def call_plugins ( self , step ) :
'''For each plugins , check if a " step " method exist on it , and call it
Args :
step ( str ) : The method to search and call on each plugin'''
|
for plugin in self . plugins :
try :
getattr ( plugin , step ) ( )
except AttributeError :
self . logger . debug ( "{} doesn't exist on plugin {}" . format ( step , plugin ) )
except TypeError :
self . logger . debug ( "{} on plugin {} is not callable" . format ( step , plugin ) )
|
def route_tables_list_all ( ** kwargs ) :
'''. . versionadded : : 2019.2.0
List all route tables within a subscription .
CLI Example :
. . code - block : : bash
salt - call azurearm _ network . route _ tables _ list _ all'''
|
result = { }
netconn = __utils__ [ 'azurearm.get_client' ] ( 'network' , ** kwargs )
try :
tables = __utils__ [ 'azurearm.paged_object_to_list' ] ( netconn . route_tables . list_all ( ) )
for table in tables :
result [ table [ 'name' ] ] = table
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'network' , str ( exc ) , ** kwargs )
result = { 'error' : str ( exc ) }
return result
|
def IsRunning ( self ) :
"""Returns True if there ' s a currently running iteration of this job ."""
|
current_urn = self . Get ( self . Schema . CURRENT_FLOW_URN )
if not current_urn :
return False
try :
current_flow = aff4 . FACTORY . Open ( urn = current_urn , aff4_type = flow . GRRFlow , token = self . token , mode = "r" )
except aff4 . InstantiationError : # This isn ' t a flow , something went really wrong , clear it out .
logging . error ( "Unable to open cron job run: %s" , current_urn )
self . DeleteAttribute ( self . Schema . CURRENT_FLOW_URN )
self . Flush ( )
return False
return current_flow . GetRunner ( ) . IsRunning ( )
|
def search_tags ( self , tag_name ) :
"""Searches for tags
: param tag _ name : Partial tag name to get autocomplete suggestions for"""
|
response = self . _req ( '/browse/tags/search' , { "tag_name" : tag_name } )
tags = list ( )
for item in response [ 'results' ] :
tags . append ( item [ 'tag_name' ] )
return tags
|
def _get_rename_function ( mapper ) :
"""Returns a function that will map names / labels , dependent if mapper
is a dict , Series or just a function ."""
|
if isinstance ( mapper , ( abc . Mapping , ABCSeries ) ) :
def f ( x ) :
if x in mapper :
return mapper [ x ]
else :
return x
else :
f = mapper
return f
|
def start ( self , program , start = None , stop = None , resolution = None , max_delay = None ) :
"""Start executing the given SignalFlow program without being attached
to the output of the computation ."""
|
params = self . _get_params ( start = start , stop = stop , resolution = resolution , maxDelay = max_delay )
self . _transport . start ( program , params )
|
def ascolumn ( x , dtype = None ) :
'''Convert ` ` x ` ` into a ` ` column ` ` - type ` ` numpy . ndarray ` ` .'''
|
x = asarray ( x , dtype )
return x if len ( x . shape ) >= 2 else x . reshape ( len ( x ) , 1 )
|
async def put ( self , tube , data , ttl = None , ttr = None , delay = None ) :
"""Enqueue a task .
Returns a ` Task ` object ."""
|
cmd = tube . cmd ( "put" )
args = ( data , )
params = dict ( )
if ttr is not None :
params [ "ttr" ] = ttr
if ttl is not None :
params [ "ttl" ] = ttl
if delay is not None :
params [ "delay" ] = delay
if params :
args += ( params , )
res = await self . tnt . call ( cmd , args )
return res
|
def parseMemory ( memAttribute ) :
"""Returns EC2 ' memory ' string as a float .
Format should always be ' # ' GiB ( example : ' 244 GiB ' or ' 1,952 GiB ' ) .
Amazon loves to put commas in their numbers , so we have to accommodate that .
If the syntax ever changes , this will raise .
: param memAttribute : EC2 JSON memory param string .
: return : A float representing memory in GiB ."""
|
mem = memAttribute . replace ( ',' , '' ) . split ( )
if mem [ 1 ] == 'GiB' :
return float ( mem [ 0 ] )
else :
raise RuntimeError ( 'EC2 JSON format has likely changed. Error parsing memory.' )
|
def describe ( self , bucket , descriptor = None ) :
"""https : / / github . com / frictionlessdata / tableschema - bigquery - py # storage"""
|
# Set descriptor
if descriptor is not None :
self . __descriptors [ bucket ] = descriptor
# Get descriptor
else :
descriptor = self . __descriptors . get ( bucket )
if descriptor is None :
table_name = self . __mapper . convert_bucket ( bucket )
response = self . __service . tables ( ) . get ( projectId = self . __project , datasetId = self . __dataset , tableId = table_name ) . execute ( )
converted_descriptor = response [ 'schema' ]
descriptor = self . __mapper . restore_descriptor ( converted_descriptor )
return descriptor
|
def _read_json_db ( self ) :
"""read metadata from a json string stored in a DB .
: return : the parsed json dict
: rtype : dict"""
|
try :
metadata_str = self . db_io . read_metadata_from_uri ( self . layer_uri , 'json' )
except HashNotFoundError :
return { }
try :
metadata = json . loads ( metadata_str )
return metadata
except ValueError :
message = tr ( 'the file DB entry for %s does not appear to be ' 'valid JSON' )
message %= self . layer_uri
raise MetadataReadError ( message )
|
def is_sms_service_for_region ( numobj , region_dialing_from ) :
"""Given a valid short number , determines whether it is an SMS service
( however , nothing is implied about its validity ) . An SMS service is where
the primary or only intended usage is to receive and / or send text messages
( SMSs ) . This includes MMS as MMS numbers downgrade to SMS if the other
party isn ' t MMS - capable . If it is important that the number is valid , then
its validity must first be checked using is _ valid _ short _ number or
is _ valid _ short _ number _ for _ region . Returns False if the number doesn ' t
match the region provided .
Arguments :
numobj - - the valid short number to check
region _ dialing _ from - - the region from which the number is dialed
Returns whether the short number is an SMS service in the provided region ,
assuming the input was a valid short number ."""
|
if not _region_dialing_from_matches_number ( numobj , region_dialing_from ) :
return False
metadata = PhoneMetadata . short_metadata_for_region ( region_dialing_from )
return ( metadata is not None and _matches_possible_number_and_national_number ( national_significant_number ( numobj ) , metadata . sms_services ) )
|
def hold_absent ( name , snapshot , recursive = False ) :
'''ensure hold is absent on the system
name : string
name of hold
snapshot : string
name of snapshot
recursive : boolean
recursively releases a hold with the given tag on the snapshots of all descendent file systems .'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
# # log configuration
log . debug ( 'zfs.hold_absent::%s::config::snapshot = %s' , name , snapshot )
log . debug ( 'zfs.hold_absent::%s::config::recursive = %s' , name , recursive )
# # check we have a snapshot / tag name
if not __utils__ [ 'zfs.is_snapshot' ] ( snapshot ) :
ret [ 'result' ] = False
ret [ 'comment' ] = 'invalid snapshot name: {0}' . format ( snapshot )
return ret
if __utils__ [ 'zfs.is_snapshot' ] ( name ) or __utils__ [ 'zfs.is_bookmark' ] ( name ) or name == 'error' :
ret [ 'result' ] = False
ret [ 'comment' ] = 'invalid tag name: {0}' . format ( name )
return ret
# # release hold if required
holds = __salt__ [ 'zfs.holds' ] ( snapshot )
if name in holds : # # NOTE : hold found for snapshot , release it
if not __opts__ [ 'test' ] :
mod_res = __salt__ [ 'zfs.release' ] ( name , snapshot , ** { 'recursive' : recursive } )
else :
mod_res = OrderedDict ( [ ( 'released' , True ) ] )
ret [ 'result' ] = mod_res [ 'released' ]
if ret [ 'result' ] :
ret [ 'changes' ] = { snapshot : { name : 'released' } }
ret [ 'comment' ] = 'hold {0} released' . format ( name , )
else :
ret [ 'comment' ] = 'failed to release hold {0}' . format ( name , )
if 'error' in mod_res :
ret [ 'comment' ] = mod_res [ 'error' ]
elif 'error' in holds : # # NOTE : we have an error
ret [ 'result' ] = False
ret [ 'comment' ] = holds [ 'error' ]
else : # # NOTE : no hold found with name for snapshot
ret [ 'comment' ] = 'hold {0} is absent' . format ( name , )
return ret
|
def upload_to_s3 ( self , key , filename ) :
"""Set the content type and gzip headers if applicable
and upload the item to S3"""
|
extra_args = { 'ACL' : self . acl }
# determine the mimetype of the file
guess = mimetypes . guess_type ( filename )
content_type = guess [ 0 ]
encoding = guess [ 1 ]
if content_type :
extra_args [ 'ContentType' ] = content_type
# add the gzip headers , if necessary
if ( self . gzip and content_type in self . gzip_content_types ) or encoding == 'gzip' :
extra_args [ 'ContentEncoding' ] = 'gzip'
# add the cache - control headers if necessary
if content_type in self . cache_control :
extra_args [ 'CacheControl' ] = '' . join ( ( 'max-age=' , str ( self . cache_control [ content_type ] ) ) )
# access and write the contents from the file
if not self . dry_run :
logger . debug ( "Uploading %s" % filename )
if self . verbosity > 0 :
self . stdout . write ( "Uploading %s" % filename )
s3_obj = self . s3_resource . Object ( self . aws_bucket_name , key )
s3_obj . upload_file ( filename , ExtraArgs = extra_args )
# Update counts
self . uploaded_files += 1
self . uploaded_file_list . append ( filename )
|
def stop ( cls , name ) :
"""stops the timer with a given name .
: param name : the name of the timer
: type name : string"""
|
cls . timer_end [ name ] = time . time ( )
if cls . debug :
print ( "Timer" , name , "stopped ..." )
|
def run ( self , test = False ) :
'''The main entry point for a plugin .'''
|
self . _request = self . _parse_request ( )
log . debug ( 'Handling incoming request for %s' , self . request . path )
items = self . _dispatch ( self . request . path )
# Close any open storages which will persist them to disk
if hasattr ( self , '_unsynced_storages' ) :
for storage in self . _unsynced_storages . values ( ) :
log . debug ( 'Saving a %s storage to disk at "%s"' , storage . file_format , storage . filename )
storage . close ( )
return items
|
def write_idx ( self , idx , buf ) :
"""Inserts input record at given index .
Examples
> > > for i in range ( 5 ) :
. . . record . write _ idx ( i , ' record _ % d ' % i )
> > > record . close ( )
Parameters
idx : int
Index of a file .
buf :
Record to write ."""
|
key = self . key_type ( idx )
pos = self . tell ( )
self . write ( buf )
self . fidx . write ( '%s\t%d\n' % ( str ( key ) , pos ) )
self . idx [ key ] = pos
self . keys . append ( key )
|
def stream ( identifier = None , priority = LOG_INFO , level_prefix = False ) :
r"""Return a file object wrapping a stream to journal .
Log messages written to this file as simple newline sepearted text strings
are written to the journal .
The file will be line buffered , so messages are actually sent after a
newline character is written .
> > > from systemd import journal
> > > stream = journal . stream ( ' myapp ' ) # doctest : + SKIP
> > > res = stream . write ( ' message . . . \ n ' ) # doctest : + SKIP
will produce the following message in the journal : :
PRIORITY = 7
SYSLOG _ IDENTIFIER = myapp
MESSAGE = message . . .
If identifier is None , a suitable default based on sys . argv [ 0 ] will be used .
This interface can be used conveniently with the print function :
> > > from _ _ future _ _ import print _ function
> > > stream = journal . stream ( ) # doctest : + SKIP
> > > print ( ' message . . . ' , file = stream ) # doctest : + SKIP
priority is the syslog priority , one of ` LOG _ EMERG ` , ` LOG _ ALERT ` ,
` LOG _ CRIT ` , ` LOG _ ERR ` , ` LOG _ WARNING ` , ` LOG _ NOTICE ` , ` LOG _ INFO ` , ` LOG _ DEBUG ` .
level _ prefix is a boolean . If true , kernel - style log priority level prefixes
( such as ' < 1 > ' ) are interpreted . See sd - daemon ( 3 ) for more information ."""
|
if identifier is None :
if not _sys . argv or not _sys . argv [ 0 ] or _sys . argv [ 0 ] == '-c' :
identifier = 'python'
else :
identifier = _sys . argv [ 0 ]
fd = stream_fd ( identifier , priority , level_prefix )
return _os . fdopen ( fd , 'w' , 1 )
|
def wait_until_element_is_visible ( self , locator , timeout = None , error = None ) :
"""Waits until element specified with ` locator ` is visible .
Fails if ` timeout ` expires before the element is visible . See
` introduction ` for more information about ` timeout ` and its
default value .
` error ` can be used to override the default error message .
See also ` Wait Until Page Contains ` , ` Wait Until Page Contains
Element ` , ` Wait For Condition ` and BuiltIn keyword ` Wait Until Keyword
Succeeds ` ."""
|
def check_visibility ( ) :
visible = self . _is_visible ( locator )
if visible :
return
elif visible is None :
return error or "Element locator '%s' did not match any elements after %s" % ( locator , self . _format_timeout ( timeout ) )
else :
return error or "Element '%s' was not visible in %s" % ( locator , self . _format_timeout ( timeout ) )
self . _wait_until_no_error ( timeout , check_visibility )
|
def masked_pick_probability ( x , y , temp , cos_distance ) :
"""The pairwise sampling probabilities for the elements of x for neighbor
points which share labels .
: param x : a matrix
: param y : a list of labels for each element of x
: param temp : Temperature
: cos _ distance : Boolean for using cosine or Euclidean distance
: returns : A tensor for the pairwise sampling probabilities ."""
|
return SNNLCrossEntropy . pick_probability ( x , temp , cos_distance ) * SNNLCrossEntropy . same_label_mask ( y , y )
|
def __do_init ( self , dat_dict ) :
"""使用字典方式来更新到存储文件中
: param dat _ dict :
: type dat _ dict :
: return :
: rtype :"""
|
for k , v in dat_dict . items ( ) :
if isinstance ( v , dict ) :
self . cfg . init ( k , v [ 'val' ] , v [ 'proto' ] )
else :
self . cfg . init ( k , v )
self . cfg . sync ( )
|
def rows_sum_init ( hdf5_file , path , out_lock , * numpy_args ) :
"""Create global variables sharing the same object as the one pointed by
' hdf5 _ file ' , ' path ' and ' out _ lock ' .
Also Create a NumPy array copy of a multiprocessing . Array ctypes array
specified by ' * numpy _ args ' ."""
|
global g_hdf5_file , g_path , g_out , g_out_lock
g_hdf5_file , g_path , g_out_lock = hdf5_file , path , out_lock
g_out = to_numpy_array ( * numpy_args )
|
def find_by_name ( collection , name , exact = True ) :
"""Searches collection by resource name .
: param rightscale . ResourceCollection collection : The collection in which to
look for : attr : ` name ` .
: param str name : The name to look for in collection .
: param bool exact : A RightScale ` ` index ` ` search with a : attr : ` name ` filter
can return multiple results because it does a substring match on
resource names . So any resource that contains the specified name will
be returned . The : attr : ` exact ` flag controls whether to attempt to
find an exact match for the given name . If : attr : ` exact ` is ` ` False ` ` ,
this will return a list of all the matches . The default behaviour is
to perform an exact match and return a single result .
Returns ` ` None ` ` if no resources are found with a matching name ."""
|
params = { 'filter[]' : [ 'name==%s' % name ] }
found = collection . index ( params = params )
if not exact and len ( found ) > 0 :
return found
for f in found :
if f . soul [ 'name' ] == name :
return f
|
def _create_component ( tag_name , allow_children = True , callbacks = [ ] ) :
"""Create a component for an HTML Tag
Examples :
> > > marquee = _ create _ component ( ' marquee ' )
> > > marquee ( ' woohoo ' )
< marquee > woohoo < / marquee >"""
|
def _component ( * children , ** kwargs ) :
if 'children' in kwargs :
children = kwargs . pop ( 'children' )
else : # Flatten children under specific circumstances
# This supports the use case of div ( [ a , b , c ] )
# And allows users to skip the * operator
if len ( children ) == 1 and isinstance ( children [ 0 ] , list ) : # We want children to be tuples and not lists , so
# they can be immutable
children = tuple ( children [ 0 ] )
if 'style' in kwargs :
style = kwargs . pop ( 'style' )
else :
style = None
if 'attributes' in kwargs :
attributes = kwargs [ 'attributes' ]
else :
attributes = dict ( ** kwargs )
if ( tag_name == 'a' ) and ( 'href' not in attributes ) :
attributes [ 'href' ] = '#'
if not allow_children and children : # We don ' t allow children , but some were passed in
raise ValueError ( '<{tag_name} /> cannot have children' . format ( tag_name = tag_name ) )
for cb in callbacks :
cbname = cb [ 'name' ]
if cbname in attributes :
if attributes [ cbname ] is not None :
from google . colab import output as colab_output
callback_id = cbname + 'callback-' + str ( uuid . uuid4 ( ) )
register_callback ( callback_id , attributes [ cbname ] )
# js = " google . colab . kernel . invokeFunction ( ' { callback _ id } ' , [ ] , { kwargs } ) "
js = "window.vdomr_invokeFunction('{callback_id}', [], {kwargs})"
js = js . replace ( '{callback_id}' , callback_id )
js = js . replace ( '{kwargs}' , cb [ 'kwargs' ] )
attributes [ cbname ] = js
else :
attributes [ cbname ] = ''
v = VDOM ( tag_name , attributes , style , children )
return v
return _component
|
def _execute_commands ( self , commands , fails = False ) :
"""Execute commands and get list of failed commands and count of successful commands"""
|
# Confirm that prepare _ statements flag is on
if self . _prep_statements :
prepared_commands = [ prepare_sql ( c ) for c in tqdm ( commands , total = len ( commands ) , desc = 'Prepping SQL Commands' ) ]
print ( '\tCommands prepared' , len ( prepared_commands ) )
else :
prepared_commands = commands
desc = 'Executing SQL Commands' if not fails else 'Executing Failed SQL Commands'
fail , success = [ ] , 0
for command in tqdm ( prepared_commands , total = len ( prepared_commands ) , desc = desc ) : # Attempt to execute command and skip command if error is raised
try :
self . _MySQL . executemore ( command )
success += 1
except :
fail . append ( command )
self . _MySQL . _commit ( )
return fail , success
|
def cost ( self ) :
"""Get the approximate cost of this filter .
Cost is the total cost of the exclusion rules in this filter . The cost
of family - specific filters is divided by 10.
Returns :
float : The approximate cost of the filter ."""
|
total = 0.0
for family , rules in self . _excludes . iteritems ( ) :
cost = sum ( x . cost ( ) for x in rules )
if family :
cost = cost / float ( 10 )
total += cost
return total
|
def is_readable ( path ) :
"""Returns if given path is readable .
: param path : Path to check access .
: type path : unicode
: return : Is path writable .
: rtype : bool"""
|
if os . access ( path , os . R_OK ) :
LOGGER . debug ( "> '{0}' path is readable." . format ( path ) )
return True
else :
LOGGER . debug ( "> '{0}' path is not readable." . format ( path ) )
return False
|
def get_attribute ( self , element , attribute , convert_type = True ) :
""": Description : Return the given attribute of the target element .
: param element : Element for browser instance to target .
: type element : WebElement
: param attribute : Attribute of target element to return .
: type attribute : string
: param convert _ type : If enabled , will return pythonic type .
: type convert _ type : bool
: return : None , bool , int , float , string"""
|
attribute = self . browser . execute_script ( 'return arguments[0].getAttribute("%s");' % attribute , element )
return self . __type2python ( attribute ) if convert_type else attribute
|
def enable_contact_host_notifications ( self , contact ) :
"""Enable host notifications for a contact
Format of the line that triggers function call : :
ENABLE _ CONTACT _ HOST _ NOTIFICATIONS ; < contact _ name >
: param contact : contact to enable
: type contact : alignak . objects . contact . Contact
: return : None"""
|
if not contact . host_notifications_enabled :
contact . modified_attributes |= DICT_MODATTR [ "MODATTR_NOTIFICATIONS_ENABLED" ] . value
contact . host_notifications_enabled = True
self . send_an_element ( contact . get_update_status_brok ( ) )
|
def add_property ( self , set_property , name , starting_value , tag_name = None ) :
"""Set properies of atributes stored in content using stored common fdel and fget and given fset .
Args :
set _ property - - Function that sets given property .
name - - Name of the atribute this property must simulate . Used as key in content dict by default .
starting _ value - - Starting value of given property .
Keyword args :
tag _ name - - The tag name stored in conted dict as a key if different to name ."""
|
def del_property ( self , tag_name ) :
try :
del self . _content [ tag_name ]
except KeyError :
pass
def get_property ( self , tag_name ) :
try :
return self . _content [ tag_name ]
except KeyError :
return None
tag_name = ( name if tag_name is None else tag_name )
fget = lambda self : get_property ( self , tag_name )
fdel = lambda self : del_property ( self , tag_name )
fset = lambda self , value : set_property ( value )
setattr ( self . __class__ , name , property ( fget , fset , fdel ) )
set_property ( starting_value )
|
def get_configuration ( ) :
"""Combine defaults with the Django configuration ."""
|
configuration = { "get_object_function" : None , "hcard_path" : "/hcard/users/" , "nodeinfo2_function" : None , "process_payload_function" : None , "search_path" : None , # TODO remove or default to True once AP support is more ready
"activitypub" : False , }
configuration . update ( settings . FEDERATION )
if not all ( [ "get_private_key_function" in configuration , "get_profile_function" in configuration , "base_url" in configuration , ] ) :
raise ImproperlyConfigured ( "Missing required FEDERATION settings, please check documentation." )
return configuration
|
def _ProcessSources ( self , source_path_specs , storage_writer , filter_find_specs = None ) :
"""Processes the sources .
Args :
source _ path _ specs ( list [ dfvfs . PathSpec ] ) : path specifications of
the sources to process .
storage _ writer ( StorageWriter ) : storage writer for a session storage .
filter _ find _ specs ( Optional [ list [ dfvfs . FindSpec ] ] ) : find specifications
used in path specification extraction . If set , path specifications
that match the find specification will be processed ."""
|
if self . _processing_profiler :
self . _processing_profiler . StartTiming ( 'process_sources' )
self . _status = definitions . STATUS_INDICATOR_COLLECTING
self . _number_of_consumed_event_tags = 0
self . _number_of_consumed_events = 0
self . _number_of_consumed_reports = 0
self . _number_of_consumed_sources = 0
self . _number_of_consumed_warnings = 0
self . _number_of_produced_event_tags = 0
self . _number_of_produced_events = 0
self . _number_of_produced_reports = 0
self . _number_of_produced_sources = 0
self . _number_of_produced_warnings = 0
path_spec_generator = self . _path_spec_extractor . ExtractPathSpecs ( source_path_specs , find_specs = filter_find_specs , recurse_file_system = False , resolver_context = self . _resolver_context )
for path_spec in path_spec_generator :
if self . _abort :
break
# TODO : determine if event sources should be DataStream or FileEntry
# or both .
event_source = event_sources . FileEntryEventSource ( path_spec = path_spec )
storage_writer . AddEventSource ( event_source )
self . _number_of_produced_sources = storage_writer . number_of_event_sources
# Update the foreman process status in case we are using a filter file .
self . _UpdateForemanProcessStatus ( )
if self . _status_update_callback :
self . _status_update_callback ( self . _processing_status )
self . _ScheduleTasks ( storage_writer )
if self . _abort :
self . _status = definitions . STATUS_INDICATOR_ABORTED
else :
self . _status = definitions . STATUS_INDICATOR_COMPLETED
self . _number_of_produced_events = storage_writer . number_of_events
self . _number_of_produced_sources = storage_writer . number_of_event_sources
self . _number_of_produced_warnings = storage_writer . number_of_warnings
if self . _processing_profiler :
self . _processing_profiler . StopTiming ( 'process_sources' )
# Update the foreman process and task status in case we are using
# a filter file .
self . _UpdateForemanProcessStatus ( )
tasks_status = self . _task_manager . GetStatusInformation ( )
if self . _task_queue_profiler :
self . _task_queue_profiler . Sample ( tasks_status )
self . _processing_status . UpdateTasksStatus ( tasks_status )
if self . _status_update_callback :
self . _status_update_callback ( self . _processing_status )
|
def precompute_optimzation_S ( laplacian_matrix , n_samples , relaxation_kwds ) :
"""compute Rk , A , ATAinv , neighbors and pairs for projected mode"""
|
relaxation_kwds . setdefault ( 'presave' , False )
relaxation_kwds . setdefault ( 'presave_name' , 'pre_comp_current.npy' )
relaxation_kwds . setdefault ( 'verbose' , False )
if relaxation_kwds [ 'verbose' ] :
print ( 'Pre-computing quantities Y to S conversions' )
print ( 'Making A and Pairs' )
A , pairs = makeA ( laplacian_matrix )
if relaxation_kwds [ 'verbose' ] :
print ( 'Making Rk and nbhds' )
Rk_tensor , nbk = compute_Rk ( laplacian_matrix , A , n_samples )
# TODO : not quite sure what is ATAinv ? why we need this ?
ATAinv = np . linalg . pinv ( A . T . dot ( A ) . todense ( ) )
if relaxation_kwds [ 'verbose' ] :
print ( 'Finish calculating pseudo inverse' )
if relaxation_kwds [ 'presave' ] :
raise NotImplementedError ( 'Not yet implemented presave' )
return { 'RK' : Rk_tensor , 'nbk' : nbk , 'ATAinv' : ATAinv , 'pairs' : pairs , 'A' : A }
|
def present ( name , properties = None , filesystem_properties = None , layout = None , config = None ) :
'''ensure storage pool is present on the system
name : string
name of storage pool
properties : dict
optional set of properties to set for the storage pool
filesystem _ properties : dict
optional set of filesystem properties to set for the storage pool ( creation only )
layout : dict
disk layout to use if the pool does not exist ( creation only )
config : dict
fine grain control over this state
. . note : :
The following configuration properties can be toggled in the config parameter .
- import ( true ) - try to import the pool before creating it if absent
- import _ dirs ( None ) - specify additional locations to scan for devices on import ( comma - seperated )
- device _ dir ( None , SunOS = / dev / dsk , Linux = / dev ) - specify device directory to prepend for none
absolute device paths
- force ( false ) - try to force the import or creation
. . note : :
It is no longer needed to give a unique name to each top - level vdev , the old
layout format is still supported but no longer recommended .
. . code - block : : yaml
- mirror :
- / tmp / vdisk3
- / tmp / vdisk2
- mirror :
- / tmp / vdisk0
- / tmp / vdisk1
The above yaml will always result in the following zpool create :
. . code - block : : bash
zpool create mypool mirror / tmp / vdisk3 / tmp / vdisk2 mirror / tmp / vdisk0 / tmp / vdisk1
. . warning : :
The legacy format is also still supported but not recommended ,
because ID ' s inside the layout dict must be unique they need to have a suffix .
. . code - block : : yaml
mirror - 0:
/ tmp / vdisk3
/ tmp / vdisk2
mirror - 1:
/ tmp / vdisk0
/ tmp / vdisk1
. . warning : :
Pay attention to the order of your dict !
. . code - block : : yaml
- mirror :
- / tmp / vdisk0
- / tmp / vdisk1
- / tmp / vdisk2
The above will result in the following zpool create :
. . code - block : : bash
zpool create mypool mirror / tmp / vdisk0 / tmp / vdisk1 / tmp / vdisk2
Creating a 3 - way mirror ! While you probably expect it to be mirror
root vdev with 2 devices + a root vdev of 1 device !'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' }
# config defaults
default_config = { 'import' : True , 'import_dirs' : None , 'device_dir' : None , 'force' : False }
if __grains__ [ 'kernel' ] == 'SunOS' :
default_config [ 'device_dir' ] = '/dev/dsk'
elif __grains__ [ 'kernel' ] == 'Linux' :
default_config [ 'device_dir' ] = '/dev'
# merge state config
if config :
default_config . update ( config )
config = default_config
# ensure properties are zfs values
if properties :
properties = __utils__ [ 'zfs.from_auto_dict' ] ( properties )
elif properties is None :
properties = { }
if filesystem_properties :
filesystem_properties = __utils__ [ 'zfs.from_auto_dict' ] ( filesystem_properties )
elif filesystem_properties is None :
filesystem_properties = { }
# parse layout
vdevs = _layout_to_vdev ( layout , config [ 'device_dir' ] )
if vdevs :
vdevs . insert ( 0 , name )
# log configuration
log . debug ( 'zpool.present::%s::config - %s' , name , config )
log . debug ( 'zpool.present::%s::vdevs - %s' , name , vdevs )
log . debug ( 'zpool.present::%s::properties - %s' , name , properties )
log . debug ( 'zpool.present::%s::filesystem_properties - %s' , name , filesystem_properties )
# ensure the pool is present
ret [ 'result' ] = False
# don ' t do anything because this is a test
if __opts__ [ 'test' ] :
ret [ 'result' ] = True
if __salt__ [ 'zpool.exists' ] ( name ) :
ret [ 'changes' ] [ name ] = 'uptodate'
else :
ret [ 'changes' ] [ name ] = 'imported' if config [ 'import' ] else 'created'
ret [ 'comment' ] = 'storage pool {0} was {1}' . format ( name , ret [ 'changes' ] [ name ] )
# update pool
elif __salt__ [ 'zpool.exists' ] ( name ) :
ret [ 'result' ] = True
# fetch current pool properties
properties_current = __salt__ [ 'zpool.get' ] ( name , parsable = True )
# build list of properties to update
properties_update = [ ]
if properties :
for prop in properties : # skip unexisting properties
if prop not in properties_current :
log . warning ( 'zpool.present::%s::update - unknown property: %s' , name , prop )
continue
# compare current and wanted value
if properties_current [ prop ] != properties [ prop ] :
properties_update . append ( prop )
# update pool properties
for prop in properties_update :
res = __salt__ [ 'zpool.set' ] ( name , prop , properties [ prop ] )
if res [ 'set' ] :
if name not in ret [ 'changes' ] :
ret [ 'changes' ] [ name ] = { }
ret [ 'changes' ] [ name ] [ prop ] = properties [ prop ]
else :
ret [ 'result' ] = False
if ret [ 'comment' ] == '' :
ret [ 'comment' ] = 'The following properties were not updated:'
ret [ 'comment' ] = '{0} {1}' . format ( ret [ 'comment' ] , prop )
if ret [ 'result' ] :
ret [ 'comment' ] = 'properties updated' if ret [ 'changes' ] else 'no update needed'
# import or create the pool ( at least try to anyway )
else : # import pool
if config [ 'import' ] :
mod_res = __salt__ [ 'zpool.import' ] ( name , force = config [ 'force' ] , dir = config [ 'import_dirs' ] , )
ret [ 'result' ] = mod_res [ 'imported' ]
if ret [ 'result' ] :
ret [ 'changes' ] [ name ] = 'imported'
ret [ 'comment' ] = 'storage pool {0} was imported' . format ( name )
# create pool
if not ret [ 'result' ] and vdevs :
log . debug ( 'zpool.present::%s::creating' , name )
# execute zpool . create
mod_res = __salt__ [ 'zpool.create' ] ( * vdevs , force = config [ 'force' ] , properties = properties , filesystem_properties = filesystem_properties )
ret [ 'result' ] = mod_res [ 'created' ]
if ret [ 'result' ] :
ret [ 'changes' ] [ name ] = 'created'
ret [ 'comment' ] = 'storage pool {0} was created' . format ( name )
elif 'error' in mod_res :
ret [ 'comment' ] = mod_res [ 'error' ]
else :
ret [ 'comment' ] = 'could not create storage pool {0}' . format ( name )
# give up , we cannot import the pool and we do not have a layout to create it
if not ret [ 'result' ] and not vdevs :
ret [ 'comment' ] = 'storage pool {0} was not imported, no (valid) layout specified for creation' . format ( name )
return ret
|
def vqa_v2_preprocess_image ( image , height , width , mode , resize_side = 512 , distort = True , image_model_fn = "resnet_v1_152" , ) :
"""vqa v2 preprocess image ."""
|
image = tf . image . convert_image_dtype ( image , dtype = tf . float32 )
assert resize_side > 0
if resize_side :
image = _aspect_preserving_resize ( image , resize_side )
if mode == tf . estimator . ModeKeys . TRAIN :
image = tf . random_crop ( image , [ height , width , 3 ] )
else : # Central crop , assuming resize _ height > height , resize _ width > width .
image = tf . image . resize_image_with_crop_or_pad ( image , height , width )
image = tf . clip_by_value ( image , 0.0 , 1.0 )
if mode == tf . estimator . ModeKeys . TRAIN and distort :
image = _flip ( image )
num_distort_cases = 4
# pylint : disable = unnecessary - lambda
image = _apply_with_random_selector ( image , lambda x , ordering : _distort_color ( x , ordering ) , num_cases = num_distort_cases )
if image_model_fn . startswith ( "resnet_v1" ) : # resnet _ v1 uses vgg preprocessing
image = image * 255.
image = _mean_image_subtraction ( image , [ _R_MEAN , _G_MEAN , _B_MEAN ] )
elif image_model_fn . startswith ( "resnet_v2" ) : # resnet v2 uses inception preprocessing
image = tf . subtract ( image , 0.5 )
image = tf . multiply ( image , 2.0 )
return image
|
def SolveClosestFacility ( self , facilities = None , incidents = None , barriers = None , polylineBarriers = None , polygonBarriers = None , attributeParameterValues = None , returnDirections = None , directionsLanguage = None , directionsStyleName = None , directionsLengthUnits = None , directionsTimeAttributeName = None , returnCFRoutes = None , returnFacilities = None , returnIncidents = None , returnBarriers = None , returnPolylineBarriers = None , returnPolygonBarriers = None , facilityReturnType = None , outputLines = None , defaultCutoff = None , defaultTargetFacilityCount = None , travelDirection = None , outSR = None , impedanceAttributeName = None , restrictionAttributeNames = None , restrictUTurns = None , useHierarchy = None , outputGeometryPrecision = None , outputGeometryPrecisionUnits = None ) :
"""The solve operation is performed on a network layer resource of type
closest facility ."""
|
raise NotImplementedError ( )
|
def set ( self , value , field = None , index = None , check = 1 ) :
"""Set value of this parameter from a string or other value .
Field is optional parameter field ( p _ prompt , p _ minimum , etc . )
Index is optional array index ( zero - based ) . Set check = 0 to
assign the value without checking to see if it is within
the min - max range or in the choice list ."""
|
if index is not None :
sumindex = self . _sumindex ( index )
try :
value = self . _coerceOneValue ( value )
if check :
self . value [ sumindex ] = self . checkOneValue ( value )
else :
self . value [ sumindex ] = value
return
except IndexError : # should never happen
raise SyntaxError ( "Illegal index [" + repr ( sumindex ) + "] for array parameter " + self . name )
if field :
self . _setField ( value , field , check = check )
else :
if check :
self . value = self . checkValue ( value )
else :
self . value = self . _coerceValue ( value )
self . setChanged ( )
|
def convert_broadcast_to ( node , ** kwargs ) :
"""Map MXNet ' s broadcast _ to operator attributes to onnx ' s Expand
operator and return the created node ."""
|
name , input_nodes , attrs = get_inputs ( node , kwargs )
shape_list = convert_string_to_list ( attrs [ "shape" ] )
initializer = kwargs [ "initializer" ]
output_shape_np = np . array ( shape_list , dtype = 'int64' )
data_type = onnx . mapping . NP_TYPE_TO_TENSOR_TYPE [ output_shape_np . dtype ]
dims = np . shape ( output_shape_np )
output_shape_name = "expand_attr_tensor" + str ( kwargs [ "idx" ] )
tensor_node = onnx . helper . make_tensor_value_info ( output_shape_name , data_type , dims )
initializer . append ( onnx . helper . make_tensor ( name = output_shape_name , data_type = data_type , dims = dims , vals = shape_list , raw = False , ) )
input_nodes . append ( output_shape_name )
expand_node = onnx . helper . make_node ( "Expand" , input_nodes , [ name ] , name = name )
return [ tensor_node , expand_node ]
|
def Debugger_setBlackboxPatterns ( self , patterns ) :
"""Function path : Debugger . setBlackboxPatterns
Domain : Debugger
Method name : setBlackboxPatterns
WARNING : This function is marked ' Experimental ' !
Parameters :
Required arguments :
' patterns ' ( type : array ) - > Array of regexps that will be used to check script url for blackbox state .
No return value .
Description : Replace previous blackbox patterns with passed ones . Forces backend to skip stepping / pausing in scripts with url matching one of the patterns . VM will try to leave blackboxed script by performing ' step in ' several times , finally resorting to ' step out ' if unsuccessful ."""
|
assert isinstance ( patterns , ( list , tuple ) ) , "Argument 'patterns' must be of type '['list', 'tuple']'. Received type: '%s'" % type ( patterns )
subdom_funcs = self . synchronous_command ( 'Debugger.setBlackboxPatterns' , patterns = patterns )
return subdom_funcs
|
def unpack2D ( _x ) :
"""Helper function for splitting 2D data into x and y component to make
equations simpler"""
|
_x = np . atleast_2d ( _x )
x = _x [ : , 0 ]
y = _x [ : , 1 ]
return x , y
|
def annotate ( self , fname , tables , feature_strand = False , in_memory = False , header = None , out = sys . stdout , parallel = False ) :
"""annotate a file with a number of tables
Parameters
fname : str or file
file name or file - handle
tables : list
list of tables with which to annotate ` fname `
feature _ strand : bool
if this is True , then the up / downstream designations are based on
the features in ` tables ` rather than the features in ` fname `
in _ memoory : bool
if True , then tables are read into memory . This usually makes the
annotation much faster if there are more than 500 features in
` fname ` and the number of features in the table is less than 100K .
header : str
header to print out ( if True , use existing header )
out : file
where to print output
parallel : bool
if True , use multiprocessing library to execute the annotation of
each chromosome in parallel . Uses more memory ."""
|
from . annotate import annotate
return annotate ( self , fname , tables , feature_strand , in_memory , header = header , out = out , parallel = parallel )
|
def init_scheduler ( db_uri ) :
"""Initialise and configure the scheduler ."""
|
global scheduler
scheduler = apscheduler . Scheduler ( )
scheduler . misfire_grace_time = 3600
scheduler . add_jobstore ( sqlalchemy_store . SQLAlchemyJobStore ( url = db_uri ) , 'default' )
scheduler . add_listener ( job_listener , events . EVENT_JOB_EXECUTED | events . EVENT_JOB_MISSED | events . EVENT_JOB_ERROR )
return scheduler
|
def setEditorData ( self , editor , value ) :
"""Sets the value for the given editor to the inputed value .
: param editor | < QWidget >
value | < variant >"""
|
# set the data for a multitagedit
if ( isinstance ( editor , XMultiTagEdit ) ) :
if ( not isinstance ( value , list ) ) :
value = [ nativestring ( value ) ]
else :
value = map ( nativestring , value )
editor . setTags ( value )
editor . setCurrentItem ( editor . createItem ( ) )
# set the data for a combobox
elif ( isinstance ( editor , QComboBox ) ) :
i = editor . findText ( nativestring ( value ) )
editor . setCurrentIndex ( i )
editor . lineEdit ( ) . selectAll ( )
# set the data for a line edit
elif ( isinstance ( editor , QLineEdit ) ) :
editor . setText ( nativestring ( value ) )
editor . selectAll ( )
|
def image ( request , obj_id ) :
"""Handles a request based on method and calls the appropriate function"""
|
obj = Image . objects . get ( pk = obj_id )
if request . method == 'POST' :
return post ( request , obj )
elif request . method == 'PUT' :
getPutData ( request )
return put ( request , obj )
elif request . method == 'DELETE' :
getPutData ( request )
return delete ( request , obj )
|
def claim_messages ( self , queue , ttl , grace , count = None ) :
"""Claims up to ` count ` unclaimed messages from the specified queue . If
count is not specified , the default is to claim 10 messages .
The ` ttl ` parameter specifies how long the server should wait before
releasing the claim . The ttl value MUST be between 60 and 43200 seconds .
The ` grace ` parameter is the message grace period in seconds . The value
of grace MUST be between 60 and 43200 seconds . The server extends the
lifetime of claimed messages to be at least as long as the lifetime of
the claim itself , plus a specified grace period to deal with crashed
workers ( up to 1209600 or 14 days including claim lifetime ) . If a
claimed message would normally live longer than the grace period , its
expiration will not be adjusted .
Returns a QueueClaim object , whose ' messages ' attribute contains the
list of QueueMessage objects representing the claimed messages ."""
|
return queue . claim_messages ( ttl , grace , count = count )
|
def StopTaskStorage ( self , abort = False ) :
"""Removes the temporary path for the task storage .
The results of tasks will be lost on abort .
Args :
abort ( bool ) : True to indicate the stop is issued on abort .
Raises :
IOError : if the storage type is not supported .
OSError : if the storage type is not supported ."""
|
if self . _storage_type != definitions . STORAGE_TYPE_SESSION :
raise IOError ( 'Unsupported storage type.' )
if os . path . isdir ( self . _merge_task_storage_path ) :
if abort :
shutil . rmtree ( self . _merge_task_storage_path )
else :
os . rmdir ( self . _merge_task_storage_path )
if os . path . isdir ( self . _processed_task_storage_path ) :
if abort :
shutil . rmtree ( self . _processed_task_storage_path )
else :
os . rmdir ( self . _processed_task_storage_path )
if os . path . isdir ( self . _task_storage_path ) :
if abort :
shutil . rmtree ( self . _task_storage_path )
else :
os . rmdir ( self . _task_storage_path )
self . _merge_task_storage_path = None
self . _processed_task_storage_path = None
self . _task_storage_path = None
|
def prepopulate ( self , queryset ) :
"""Perpopulate a descendants query ' s children efficiently .
Call like : blah . prepopulate ( blah . get _ descendants ( ) . select _ related ( stuff ) )"""
|
objs = list ( queryset )
hashobjs = dict ( [ ( x . pk , x ) for x in objs ] + [ ( self . pk , self ) ] )
for descendant in hashobjs . values ( ) :
descendant . _cached_children = [ ]
for descendant in objs :
assert descendant . _closure_parent_pk in hashobjs
parent = hashobjs [ descendant . _closure_parent_pk ]
parent . _cached_children . append ( descendant )
|
def get_path_components ( path ) :
"""Extract the module name and class name out of the fully qualified path to the class .
: param str path : The full path to the class .
: return : The module path and the class name .
: rtype : str , str
: raise : ` ` VerifyingDoubleImportError ` ` if the path is to a top - level module ."""
|
path_segments = path . split ( '.' )
module_path = '.' . join ( path_segments [ : - 1 ] )
if module_path == '' :
raise VerifyingDoubleImportError ( 'Invalid import path: {}.' . format ( path ) )
class_name = path_segments [ - 1 ]
return module_path , class_name
|
def leaf_sections ( h ) :
"""Returns a list of all sections that have no children ."""
|
leaves = [ ]
for section in h . allsec ( ) :
sref = h . SectionRef ( sec = section )
# nchild returns a float . . . cast to bool
if sref . nchild ( ) < 0.9 :
leaves . append ( section )
return leaves
|
def translate_path ( self , path ) :
"""Translate a / - separated PATH to the local filename syntax .
Components that mean special things to the local file system
( e . g . drive or directory names ) are ignored . ( XXX They should
probably be diagnosed . )"""
|
# abandon query parameters
path = path . split ( '?' , 1 ) [ 0 ]
path = path . split ( '#' , 1 ) [ 0 ]
# Don ' t forget explicit trailing slash when normalizing . Issue17324
trailing_slash = path . rstrip ( ) . endswith ( '/' )
path = posixpath . normpath ( urllib . parse . unquote ( path ) )
words = path . split ( '/' )
words = [ _f for _f in words if _f ]
path = self . base_path
for word in words :
_ , word = os . path . splitdrive ( word )
_ , word = os . path . split ( word )
if word in ( os . curdir , os . pardir ) :
continue
path = os . path . join ( path , word )
if trailing_slash :
path += '/'
return path
|
def get_all_attribute_value ( self , tag_name , attribute , format_value = True , ** attribute_filter ) :
"""Yields all the attribute values in xml files which match with the tag name and the specific attribute
: param str tag _ name : specify the tag name
: param str attribute : specify the attribute
: param bool format _ value : specify if the value needs to be formatted with packagename"""
|
tags = self . find_tags ( tag_name , ** attribute_filter )
for tag in tags :
value = tag . get ( attribute ) or tag . get ( self . _ns ( attribute ) )
if value is not None :
if format_value :
yield self . _format_value ( value )
else :
yield value
|
def keyboard ( table , day = None ) :
"""Handler for showing the keyboard statistics page ."""
|
cols , group = "realkey AS key, COUNT(*) AS count" , "realkey"
where = ( ( "day" , day ) , ) if day else ( )
counts_display = counts = db . fetch ( table , cols , where , group , "count DESC" )
if "combos" == table :
counts_display = db . fetch ( table , "key, COUNT(*) AS count" , where , "key" , "count DESC" )
events = db . fetch ( table , where = where , order = "stamp" )
for e in events :
e [ "dt" ] = datetime . datetime . fromtimestamp ( e [ "stamp" ] )
stats , collatedevents = stats_keyboard ( events , table )
days , input = db . fetch ( "counts" , order = "day" , type = table ) , "keyboard"
return bottle . template ( "heatmap.tpl" , locals ( ) , conf = conf )
|
def randompaths ( request , num_paths = 1 , size = 250 , mu = 0 , sigma = 1 ) :
'''Lists of random walks .'''
|
r = [ ]
for p in range ( num_paths ) :
v = 0
path = [ v ]
r . append ( path )
for t in range ( size ) :
v += normalvariate ( mu , sigma )
path . append ( v )
return r
|
def deserialize_decimal ( attr ) :
"""Deserialize string into Decimal object .
: param str attr : response string to be deserialized .
: rtype : Decimal
: raises : DeserializationError if string format invalid ."""
|
if isinstance ( attr , ET . Element ) :
attr = attr . text
try :
return decimal . Decimal ( attr )
except decimal . DecimalException as err :
msg = "Invalid decimal {}" . format ( attr )
raise_with_traceback ( DeserializationError , msg , err )
|
def snapshot_registry ( self ) :
'''Give the dictionary of recorders detached from the existing instances .
It is safe to store those references for future use . Used by feattool .'''
|
unserializer = banana . Unserializer ( externalizer = self )
serializer = banana . Serializer ( externalizer = self )
return unserializer . convert ( serializer . convert ( self . registry ) )
|
def _validate_claim_request ( claims , ignore_errors = False ) :
"""Validates a claim request section ( ` userinfo ` or ` id _ token ` ) according
to section 5.5 of the OpenID Connect specification :
- http : / / openid . net / specs / openid - connect - core - 1_0 . html # ClaimsParameter
Returns a copy of the claim request with only the valid fields and values .
Raises ValueError is the claim request is invalid and ` ignore _ errors ` is False"""
|
results = { }
claims = claims if claims else { }
for name , value in claims . iteritems ( ) :
if value is None :
results [ name ] = None
elif isinstance ( value , dict ) :
results [ name ] = _validate_claim_values ( name , value , ignore_errors )
else :
if not ignore_errors :
msg = 'Invalid claim {}.' . format ( name )
raise ValueError ( msg )
return results
|
def repr_text ( text , indent ) :
"""Return a debug representation of a multi - line text ( e . g . the result
of another repr . . . ( ) function ) ."""
|
if text is None :
return 'None'
ret = _indent ( text , amount = indent )
return ret . lstrip ( ' ' )
|
def _vertex_different_colors_qubo ( G , x_vars ) :
"""For each vertex , it should not have the same color as any of its
neighbors . Generates the QUBO to enforce this constraint .
Notes
Does not enforce each node having a single color .
Ground energy is 0 , infeasible gap is 1."""
|
Q = { }
for u , v in G . edges :
if u not in x_vars or v not in x_vars :
continue
for color in x_vars [ u ] :
if color in x_vars [ v ] :
Q [ ( x_vars [ u ] [ color ] , x_vars [ v ] [ color ] ) ] = 1.
return Q
|
def checkout_default_branch ( self ) :
"""git checkout default branch"""
|
set_state ( WORKFLOW_STATES . CHECKING_OUT_DEFAULT_BRANCH )
cmd = "git" , "checkout" , self . config [ "default_branch" ]
self . run_cmd ( cmd )
set_state ( WORKFLOW_STATES . CHECKED_OUT_DEFAULT_BRANCH )
|
def get_ordered_entries ( self , queryset = False ) :
"""Custom ordering . First we get the average views and rating for
the categories ' s entries . Second we created a rank by multiplying
both . Last , we sort categories by this rank from top to bottom .
Example :
- Cat _ 1
- Entry _ 1 ( 500 Views , Rating 2)
- Entry _ 2 ( 200 Views , Rating - 4)
- Entry _ 3 ( 100 Views , Rating 3)
- Cat _ 2
- Entry _ 1 ( 200 Views , Rating 7)
- Entry _ 2 ( 50 Views , Rating 2)
Result :
Cat _ 1 has a rank by : 88.88 ( avg . views : 266.66 , avg . rating : 0.33)
Cat _ 2 has a rank by : 562.5 ( avg . views : 125 , avg . rating : 4.5)
Cat _ 2 will be displayed at the top . The algorithm is quality - oriented ,
as you can see ."""
|
if queryset :
self . queryset = queryset
else :
self . queryset = EntryCategory . objects . all ( )
if self . queryset :
for category in self . queryset :
entries = category . get_entries ( )
if entries :
amount_list = [ e . amount_of_views for e in entries ]
rating_list = [ e . rating ( ) for e in entries ]
views_per_entry = fsum ( amount_list ) / len ( amount_list )
rating_per_entry = fsum ( rating_list ) / len ( rating_list )
category . last_rank = views_per_entry * rating_per_entry
category . save ( )
else :
self . queryset = self . queryset . exclude ( pk = category . pk )
self . queryset = sorted ( self . queryset , key = lambda c : c . last_rank , reverse = True )
return self . queryset
|
def get_referenced_object ( self ) :
""": rtype : core . BunqModel
: raise : BunqException"""
|
if self . _Payment is not None :
return self . _Payment
if self . _PaymentBatch is not None :
return self . _PaymentBatch
raise exception . BunqException ( self . _ERROR_NULL_FIELDS )
|
def transform ( im , mean , std ) :
"""transform into mxnet tensor ,
subtract pixel size and transform to correct format
: param im : [ height , width , channel ] in BGR
: param mean : [ RGB pixel mean ]
: param std : [ RGB pixel std var ]
: return : [ batch , channel , height , width ]"""
|
im_tensor = np . zeros ( ( 3 , im . shape [ 0 ] , im . shape [ 1 ] ) )
for i in range ( 3 ) :
im_tensor [ i , : , : ] = ( im [ : , : , 2 - i ] - mean [ i ] ) / std [ i ]
return im_tensor
|
def update ( self , infos ) :
"""Process received infos ."""
|
for info in infos :
if isinstance ( info , LearningGene ) :
self . replicate ( info )
|
def tree ( bary , n , standardization , symbolic = False ) :
"""Evaluates the entire tree of orthogonal triangle polynomials .
The return value is a list of arrays , where ` out [ k ] ` hosts the ` 2 * k + 1 `
values of the ` k ` th level of the tree
(0 , 0)
(0 , 1 ) ( 1 , 1)
(0 , 2 ) ( 1 , 2 ) ( 2 , 2)
For reference , see
Abedallah Rababah ,
Recurrence Relations for Orthogonal Polynomials on Triangular Domains ,
Mathematics 2016 , 4(2 ) , 25,
< https : / / doi . org / 10.3390 / math4020025 > ."""
|
S = numpy . vectorize ( sympy . S ) if symbolic else lambda x : x
sqrt = numpy . vectorize ( sympy . sqrt ) if symbolic else numpy . sqrt
if standardization == "1" :
p0 = 1
def alpha ( n ) :
r = numpy . arange ( n )
return S ( n * ( 2 * n + 1 ) ) / ( ( n - r ) * ( n + r + 1 ) )
def beta ( n ) :
r = numpy . arange ( n )
return S ( n * ( 2 * r + 1 ) ** 2 ) / ( ( n - r ) * ( n + r + 1 ) * ( 2 * n - 1 ) )
def gamma ( n ) :
r = numpy . arange ( n - 1 )
return S ( ( n - r - 1 ) * ( n + r ) * ( 2 * n + 1 ) ) / ( ( n - r ) * ( n + r + 1 ) * ( 2 * n - 1 ) )
def delta ( n ) :
return S ( 2 * n - 1 ) / n
def epsilon ( n ) :
return S ( n - 1 ) / n
else : # The coefficients here are based on the insight that
# int _ T P _ { n , r } ^ 2 =
# int _ 0 ^ 1 L _ r ^ 2 ( t ) dt * int _ 0 ^ 1 q _ { n , r } ( w ) ^ 2 ( 1 - w ) ^ ( r + s + 1 ) dw .
# For reference , see
# page 219 ( and the reference to Gould , 1972 ) in
# Farouki , Goodman , Sauer ,
# Construction of orthogonal bases for polynomials in Bernstein form
# on triangular and simplex domains ,
# Computer Aided Geometric Design 20 ( 2003 ) 209–230.
# The Legendre integral is 1 / ( 2 * r + 1 ) , and one gets
# int _ T P _ { n , r } ^ 2 = 1 / ( 2 * r + 1 ) / ( 2 * n + 2)
# sum _ { i = 0 } ^ { n - r } sum _ { j = 0 } ^ { n - r }
# ( - 1 ) * * ( i + j ) * binom ( n + r + 1 , i ) * binom ( n - r , i )
# * binom ( n + r + 1 , j ) * binom ( n - r , j )
# / binom ( 2 * n + 1 , i + j )
# Astonishingly , the double sum is always 1 , hence
# int _ T P _ { n , r } ^ 2 = 1 / ( 2 * r + 1 ) / ( 2 * n + 2 ) .
assert standardization == "normal"
p0 = sqrt ( 2 )
def alpha ( n ) :
r = numpy . arange ( n )
return sqrt ( ( n + 1 ) * n ) * ( S ( 2 * n + 1 ) / ( ( n - r ) * ( n + r + 1 ) ) )
def beta ( n ) :
r = numpy . arange ( n )
return ( sqrt ( ( n + 1 ) * n ) * S ( ( 2 * r + 1 ) ** 2 ) / ( ( n - r ) * ( n + r + 1 ) * ( 2 * n - 1 ) ) )
def gamma ( n ) :
r = numpy . arange ( n - 1 )
return sqrt ( S ( n + 1 ) / ( n - 1 ) ) * ( S ( ( n - r - 1 ) * ( n + r ) * ( 2 * n + 1 ) ) / ( ( n - r ) * ( n + r + 1 ) * ( 2 * n - 1 ) ) )
def delta ( n ) :
return sqrt ( S ( ( 2 * n + 1 ) * ( n + 1 ) * ( 2 * n - 1 ) ) / n ** 3 )
def epsilon ( n ) :
return sqrt ( S ( ( 2 * n + 1 ) * ( n + 1 ) * ( n - 1 ) ) / ( ( 2 * n - 3 ) * n ** 2 ) )
u , v , w = bary
out = [ numpy . array ( [ numpy . zeros_like ( u ) + p0 ] ) ]
for L in range ( 1 , n + 1 ) :
out . append ( numpy . concatenate ( [ out [ L - 1 ] * ( numpy . multiply . outer ( alpha ( L ) , 1 - 2 * w ) . T - beta ( L ) ) . T , [ delta ( L ) * out [ L - 1 ] [ L - 1 ] * ( u - v ) ] , ] ) )
if L > 1 :
out [ - 1 ] [ : L - 1 ] -= ( out [ L - 2 ] . T * gamma ( L ) ) . T
out [ - 1 ] [ - 1 ] -= epsilon ( L ) * out [ L - 2 ] [ L - 2 ] * ( u + v ) ** 2
return out
|
def start_site ( name ) :
'''Start a Web Site in IIS .
. . versionadded : : 2017.7.0
Args :
name ( str ) : The name of the website to start .
Returns :
bool : True if successful , otherwise False
CLI Example :
. . code - block : : bash
salt ' * ' win _ iis . start _ site name = ' My Test Site ' '''
|
ps_cmd = [ 'Start-WebSite' , r"'{0}'" . format ( name ) ]
cmd_ret = _srvmgr ( ps_cmd )
return cmd_ret [ 'retcode' ] == 0
|
def url_report ( self , scan_url , apikey ) :
"""Send URLS for list of past malicous associations"""
|
url = self . base_url + "url/report"
params = { "apikey" : apikey , 'resource' : scan_url }
rate_limit_clear = self . rate_limit ( )
if rate_limit_clear :
response = requests . post ( url , params = params , headers = self . headers )
if response . status_code == self . HTTP_OK :
json_response = response . json ( )
return json_response
elif response . status_code == self . HTTP_RATE_EXCEEDED :
time . sleep ( 20 )
else :
self . logger . error ( "sent: %s, HTTP: %d" , scan_url , response . status_code )
time . sleep ( self . public_api_sleep_time )
|
def acit ( rest ) :
"Look up an acronym"
|
word = rest . strip ( )
res = util . lookup_acronym ( word )
if res is None :
return "Arg! I couldn't expand that..."
else :
return ' | ' . join ( res )
|
def permissions_for ( self , user ) :
"""Handles permission resolution for a : class : ` User ` .
This function is there for compatibility with other channel types .
Actual direct messages do not really have the concept of permissions .
This returns all the Text related permissions set to true except :
- send _ tts _ messages : You cannot send TTS messages in a DM .
- manage _ messages : You cannot delete others messages in a DM .
This also checks the kick _ members permission if the user is the owner .
Parameters
user : : class : ` User `
The user to check permissions for .
Returns
: class : ` Permissions `
The resolved permissions for the user ."""
|
base = Permissions . text ( )
base . send_tts_messages = False
base . manage_messages = False
base . mention_everyone = True
if user . id == self . owner . id :
base . kick_members = True
return base
|
def user_avatar_update ( self , userid , payload ) :
'''updated avatar by userid'''
|
response , status_code = self . __pod__ . User . post_v1_admin_user_uid_avatar_update ( sessionToken = self . __session , uid = userid , payload = payload ) . result ( )
self . logger . debug ( '%s: %s' % ( status_code , response ) )
return status_code , response
|
def get_steam_id ( vanityurl , ** kwargs ) :
"""Get a players steam id from their steam name / vanity url"""
|
params = { "vanityurl" : vanityurl }
return make_request ( "ResolveVanityURL" , params , version = "v0001" , base = "http://api.steampowered.com/ISteamUser/" , ** kwargs )
|
def gps_offset ( lat , lon , east , north ) :
'''return new lat / lon after moving east / north
by the given number of meters'''
|
bearing = math . degrees ( math . atan2 ( east , north ) )
distance = math . sqrt ( east ** 2 + north ** 2 )
return gps_newpos ( lat , lon , bearing , distance )
|
def Upload ( cls , filename ) :
"""文件上传 , 非原生input
@ todo : some upload . exe not prepared
@ param file : 文件名 ( 文件必须存在在工程resource目录下 ) , upload . exe工具放在工程tools目录下"""
|
raise Exception ( "to do" )
TOOLS_PATH = ""
RESOURCE_PATH = ""
tool_4path = os . path . join ( TOOLS_PATH , "upload.exe" )
file_4path = os . path . join ( RESOURCE_PATH , filename )
# file _ 4path . decode ( ' utf - 8 ' ) . encode ( ' gbk ' )
if os . path . isfile ( file_4path ) :
cls . Click ( )
os . system ( tool_4path + ' ' + file_4path )
else :
raise Exception ( '%s is not exists' % file_4path )
|
def isExe ( self ) :
"""Determines if the current L { PE } instance is an Executable file .
@ rtype : bool
@ return : C { True } if the current L { PE } instance is an Executable file . Otherwise , returns C { False } ."""
|
if not self . isDll ( ) and not self . isDriver ( ) and ( consts . IMAGE_FILE_EXECUTABLE_IMAGE & self . ntHeaders . fileHeader . characteristics . value ) == consts . IMAGE_FILE_EXECUTABLE_IMAGE :
return True
return False
|
def cells_from_defaults ( clz , jsonobj ) :
"""Creates a referent instance of type ` json . kind ` and
initializes it to default values ."""
|
# convert strings to dicts
if isinstance ( jsonobj , ( str , unicode ) ) :
jsonobj = json . loads ( jsonobj )
assert 'cells' in jsonobj , "No cells in object"
domain = TaxonomyCell . get_domain ( )
cells = [ ]
for num , cell_dna in enumerate ( jsonobj [ 'cells' ] ) :
assert 'kind' in cell_dna , "No type definition"
classgenerator = domain . node [ cell_dna [ 'kind' ] ] [ 'class' ]
cell = classgenerator ( )
cell [ 'num' ] . merge ( num )
for attr , val in cell_dna . items ( ) :
if not attr in [ 'kind' ] :
cell [ attr ] . merge ( val )
cells . append ( cell )
return cells
|
def resume ( self ) :
"""Resumes the response stream ."""
|
with self . _wake :
self . _paused = False
self . _wake . notifyAll ( )
|
def competitions_data_list_files ( self , id , ** kwargs ) : # noqa : E501
"""List competition data files # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . competitions _ data _ list _ files ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : Competition name ( required )
: return : Result
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . competitions_data_list_files_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . competitions_data_list_files_with_http_info ( id , ** kwargs )
# noqa : E501
return data
|
def get_success_url ( self ) :
"""Returns the success URL .
This is either the given ` next ` URL parameter or the content object ' s
` get _ absolute _ url ` method ' s return value ."""
|
if self . next :
return self . next
if self . object and self . object . content_object :
return self . object . content_object . get_absolute_url ( )
raise Exception ( 'No content object given. Please provide ``next`` in your POST' ' data' )
|
def locate_range ( self , chrom , start = None , stop = None ) :
"""Locate slice of index containing all entries within the range
` key ` : ` start ` - ` stop ` * * inclusive * * .
Parameters
chrom : object
Chromosome or contig .
start : int , optional
Position start value .
stop : int , optional
Position stop value .
Returns
loc : slice
Slice object .
Examples
> > > import allel
> > > chrom = [ ' chr2 ' , ' chr2 ' , ' chr1 ' , ' chr1 ' , ' chr1 ' , ' chr3 ' ]
> > > pos = [ 1 , 4 , 2 , 5 , 5 , 3]
> > > idx = allel . ChromPosIndex ( chrom , pos )
> > > idx . locate _ range ( ' chr1 ' )
slice ( 2 , 5 , None )
> > > idx . locate _ range ( ' chr2 ' , 1 , 4)
slice ( 0 , 2 , None )
> > > idx . locate _ range ( ' chr1 ' , 3 , 7)
slice ( 3 , 5 , None )
> > > try :
. . . idx . locate _ range ( ' chr3 ' , 4 , 9)
. . . except KeyError as e :
. . . print ( e )
( ' chr3 ' , 4 , 9)"""
|
slice_chrom = self . locate_key ( chrom )
if start is None and stop is None :
return slice_chrom
else :
pos_chrom = SortedIndex ( self . pos [ slice_chrom ] )
try :
slice_within_chrom = pos_chrom . locate_range ( start , stop )
except KeyError :
raise KeyError ( chrom , start , stop )
loc = slice ( slice_chrom . start + slice_within_chrom . start , slice_chrom . start + slice_within_chrom . stop )
return loc
|
def path_options ( line = False , radius = False , ** kwargs ) :
"""Contains options and constants shared between vector overlays
( Polygon , Polyline , Circle , CircleMarker , and Rectangle ) .
Parameters
stroke : Bool , True
Whether to draw stroke along the path .
Set it to false to disable borders on polygons or circles .
color : str , ' # 3388ff '
Stroke color .
weight : int , 3
Stroke width in pixels .
opacity : float , 1.0
Stroke opacity .
line _ cap : str , ' round ' ( lineCap )
A string that defines shape to be used at the end of the stroke .
https : / / developer . mozilla . org / en - US / docs / Web / SVG / Attribute / stroke - linecap
line _ join : str , ' round ' ( lineJoin )
A string that defines shape to be used at the corners of the stroke .
https : / / developer . mozilla . org / en - US / docs / Web / SVG / Attribute / stroke - linejoin
dash _ array : str , None ( dashArray )
A string that defines the stroke dash pattern .
Doesn ' t work on Canvas - powered layers in some old browsers .
https : / / developer . mozilla . org / en - US / docs / Web / SVG / Attribute / stroke - dasharray
dash _ offset : , str , None ( dashOffset )
A string that defines the distance into the dash pattern to start the dash .
Doesn ' t work on Canvas - powered layers in some old browsers .
https : / / developer . mozilla . org / en - US / docs / Web / SVG / Attribute / stroke - dashoffset
fill : Bool , False
Whether to fill the path with color .
Set it to false to disable filling on polygons or circles .
fill _ color : str , default to ` color ` ( fillColor )
Fill color . Defaults to the value of the color option .
fill _ opacity : float , 0.2 ( fillOpacity )
Fill opacity .
fill _ rule : str , ' evenodd ' ( fillRule )
A string that defines how the inside of a shape is determined .
https : / / developer . mozilla . org / en - US / docs / Web / SVG / Attribute / fill - rule
bubbling _ mouse _ events : Bool , True ( bubblingMouseEvents )
When true a mouse event on this path will trigger the same event on the
map ( unless L . DomEvent . stopPropagation is used ) .
Note that the presence of ` fill _ color ` will override ` fill = False ` .
See https : / / leafletjs . com / reference - 1.4.0 . html # path"""
|
extra_options = { }
if line :
extra_options = { 'smoothFactor' : kwargs . pop ( 'smooth_factor' , 1.0 ) , 'noClip' : kwargs . pop ( 'no_clip' , False ) , }
if radius :
extra_options . update ( { 'radius' : radius } )
color = kwargs . pop ( 'color' , '#3388ff' )
fill_color = kwargs . pop ( 'fill_color' , False )
if fill_color :
fill = True
elif not fill_color :
fill_color = color
fill = kwargs . pop ( 'fill' , False )
default = { 'stroke' : kwargs . pop ( 'stroke' , True ) , 'color' : color , 'weight' : kwargs . pop ( 'weight' , 3 ) , 'opacity' : kwargs . pop ( 'opacity' , 1.0 ) , 'lineCap' : kwargs . pop ( 'line_cap' , 'round' ) , 'lineJoin' : kwargs . pop ( 'line_join' , 'round' ) , 'dashArray' : kwargs . pop ( 'dash_array' , None ) , 'dashOffset' : kwargs . pop ( 'dash_offset' , None ) , 'fill' : fill , 'fillColor' : fill_color , 'fillOpacity' : kwargs . pop ( 'fill_opacity' , 0.2 ) , 'fillRule' : kwargs . pop ( 'fill_rule' , 'evenodd' ) , 'bubblingMouseEvents' : kwargs . pop ( 'bubbling_mouse_events' , True ) , }
default . update ( extra_options )
return default
|
def __PrintAdditionalImports ( self , imports ) :
"""Print additional imports needed for protorpc ."""
|
google_imports = [ x for x in imports if 'google' in x ]
other_imports = [ x for x in imports if 'google' not in x ]
if other_imports :
for import_ in sorted ( other_imports ) :
self . __printer ( import_ )
self . __printer ( )
# Note : If we ever were going to add imports from this package , we ' d
# need to sort those out and put them at the end .
if google_imports :
for import_ in sorted ( google_imports ) :
self . __printer ( import_ )
self . __printer ( )
|
def get_agents_by_ids ( self , agent_ids ) :
"""Gets an ` ` AgentList ` ` corresponding to the given ` ` IdList ` ` .
In plenary mode , the returned list contains all of the agents
specified in the ` ` Id ` ` list , in the order of the list ,
including duplicates , or an error results if an ` ` Id ` ` in the
supplied list is not found or inaccessible . Otherwise ,
inaccessible ` ` Agents ` ` may be omitted from the list and may
present the elements in any order including returning a unique
set .
arg : agent _ ids ( osid . id . IdList ) : a list of agent ` ` Ids ` `
return : ( osid . authentication . AgentList ) - the returned ` ` Agent
list ` `
raise : NotFound - an ` ` Id was ` ` not found
raise : NullArgument - ` ` agent _ ids ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceLookupSession . get _ resources _ by _ ids
# NOTE : This implementation currently ignores plenary view
collection = JSONClientValidated ( 'authentication' , collection = 'Agent' , runtime = self . _runtime )
object_id_list = [ ]
for i in agent_ids :
object_id_list . append ( ObjectId ( self . _get_id ( i , 'authentication' ) . get_identifier ( ) ) )
result = collection . find ( dict ( { '_id' : { '$in' : object_id_list } } , ** self . _view_filter ( ) ) )
result = list ( result )
sorted_result = [ ]
for object_id in object_id_list :
for object_map in result :
if object_map [ '_id' ] == object_id :
sorted_result . append ( object_map )
break
return objects . AgentList ( sorted_result , runtime = self . _runtime , proxy = self . _proxy )
|
def _apply_loffset ( self , result ) :
"""If loffset is set , offset the result index .
This is NOT an idempotent routine , it will be applied
exactly once to the result .
Parameters
result : Series or DataFrame
the result of resample"""
|
needs_offset = ( isinstance ( self . loffset , ( DateOffset , timedelta , np . timedelta64 ) ) and isinstance ( result . index , DatetimeIndex ) and len ( result . index ) > 0 )
if needs_offset :
result . index = result . index + self . loffset
self . loffset = None
return result
|
def _read_isotopedatabase ( self , ffname = 'isotopedatabase.txt' ) :
'''This private method reads the isotopedatabase . txt file in sldir
run dictory and returns z , a , elements , the cutoff mass for each
species that delineate beta + and beta - decay and the logical in
the last column . Also provides charge _ from _ element dictionary
according to isotopedatabase . txt .'''
|
name = self . sldir + ffname
z_db , a_db , el_db , stable_a_db , logic_db = np . loadtxt ( name , unpack = True , dtype = 'str' )
z_db = np . array ( z_db , dtype = 'int' )
a_db = np . array ( a_db , dtype = 'int' )
stable_a_db = np . array ( stable_a_db , dtype = 'int' )
# charge number for element name from dictionary in isotopedatabase . txt
charge_from_element_name = { }
for name in self . stable_names :
if name == 'Neutron' or name == 'Neut' or name == 'NEUT' or name == 'N-1' :
name = 'nn'
try :
zz = z_db [ np . where ( el_db == name ) ] [ 0 ]
charge_from_element_name [ name ] = zz
except IndexError :
print ( name + " does not exist in this run" )
return z_db , a_db , el_db , stable_a_db , logic_db , charge_from_element_name
|
def compare_ordereddict ( self , X , Y ) :
"""Compares two instances of an OrderedDict ."""
|
# check if OrderedDict instances have the same keys and values
child = self . compare_dicts ( X , Y )
if isinstance ( child , DeepExplanation ) :
return child
# check if the order of the keys is the same
for i , j in zip ( X . items ( ) , Y . items ( ) ) :
if i [ 0 ] != j [ 0 ] :
c = self . get_context ( )
msg = "X{0} and Y{1} are in a different order" . format ( red ( c . current_X_keys ) , green ( c . current_Y_keys ) )
return DeepExplanation ( msg )
return True
|
def _get_stddevs ( self , C , stddev_types , num_sites ) :
"""Return total standard deviation ."""
|
# standard deviation is converted from log10 to ln
std_total = np . log ( 10 ** C [ 'sigma' ] )
stddevs = [ ]
for _ in stddev_types :
stddevs . append ( np . zeros ( num_sites ) + std_total )
return stddevs
|
def get_jid ( jid ) :
'''Return the information returned when the specified job id was executed'''
|
with _get_serv ( ret = None , commit = True ) as cur :
sql = '''SELECT id, full_ret FROM salt_returns
WHERE jid = %s'''
cur . execute ( sql , ( jid , ) )
data = cur . fetchall ( )
ret = { }
if data :
for minion , full_ret in data :
ret [ minion ] = full_ret
return ret
|
def supervisor_command ( parser_args ) :
"""Supervisor - related commands"""
|
import logging
from synergy . supervisor . supervisor_configurator import SupervisorConfigurator , set_box_id
if parser_args . boxid :
set_box_id ( logging , parser_args . argument )
return
sc = SupervisorConfigurator ( )
if parser_args . reset :
sc . reset_db ( )
elif parser_args . start :
sc . mark_for_start ( parser_args . argument )
elif parser_args . stop :
sc . mark_for_stop ( parser_args . argument )
elif parser_args . query :
sc . query ( )
|
def split_into ( iterable , sizes ) :
"""Yield a list of sequential items from * iterable * of length ' n ' for each
integer ' n ' in * sizes * .
> > > list ( split _ into ( [ 1,2,3,4,5,6 ] , [ 1,2,3 ] ) )
[ [ 1 ] , [ 2 , 3 ] , [ 4 , 5 , 6 ] ]
If the sum of * sizes * is smaller than the length of * iterable * , then the
remaining items of * iterable * will not be returned .
> > > list ( split _ into ( [ 1,2,3,4,5,6 ] , [ 2,3 ] ) )
[ [ 1 , 2 ] , [ 3 , 4 , 5 ] ]
If the sum of * sizes * is larger than the length of * iterable * , fewer items
will be returned in the iteration that overruns * iterable * and further
lists will be empty :
> > > list ( split _ into ( [ 1,2,3,4 ] , [ 1,2,3,4 ] ) )
[ [ 1 ] , [ 2 , 3 ] , [ 4 ] , [ ] ]
When a ` ` None ` ` object is encountered in * sizes * , the returned list will
contain items up to the end of * iterable * the same way that itertools . slice
does :
> > > list ( split _ into ( [ 1,2,3,4,5,6,7,8,9,0 ] , [ 2,3 , None ] ) )
[ [ 1 , 2 ] , [ 3 , 4 , 5 ] , [ 6 , 7 , 8 , 9 , 0 ] ]
: func : ` split _ into ` can be useful for grouping a series of items where the
sizes of the groups are not uniform . An example would be where in a row
from a table , multiple columns represent elements of the same feature
( e . g . a point represented by x , y , z ) but , the format is not the same for
all columns ."""
|
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter ( iterable )
for size in sizes :
if size is None :
yield list ( it )
return
else :
yield list ( islice ( it , size ) )
|
def logistic ( x , a = 0. , b = 1. ) :
r"""Computes the logistic function with range : math : ` \ in ( a , b ) ` .
This is given by :
. . math : :
\ mathrm { logistic } ( x ; a , b ) = \ frac { a + b e ^ x } { 1 + e ^ x } .
Note that this is also the inverse of the logit function with domain
: math : ` ( a , b ) ` .
Parameters
x : float
The value to evaluate .
a : float , optional
The minimum bound of the range of the logistic function . Default
is 0.
b : float , optional
The maximum bound of the range of the logistic function . Default
is 1.
Returns
float
The logistic of x ."""
|
expx = numpy . exp ( x )
return ( a + b * expx ) / ( 1. + expx )
|
def connection_key ( self ) :
"""Return an index key used to cache the sampler connection ."""
|
return "{host}:{namespace}:{username}" . format ( host = self . host , namespace = self . namespace , username = self . username )
|
def _parse_kraken_output ( out_dir , db , data ) :
"""Parse kraken stat info comming from stderr ,
generating report with kraken - report"""
|
in_file = os . path . join ( out_dir , "kraken_out" )
stat_file = os . path . join ( out_dir , "kraken_stats" )
out_file = os . path . join ( out_dir , "kraken_summary" )
kraken_cmd = config_utils . get_program ( "kraken-report" , data [ "config" ] )
classify = unclassify = None
with open ( stat_file , 'r' ) as handle :
for line in handle :
if line . find ( " classified" ) > - 1 :
classify = line [ line . find ( "(" ) + 1 : line . find ( ")" ) ]
if line . find ( " unclassified" ) > - 1 :
unclassify = line [ line . find ( "(" ) + 1 : line . find ( ")" ) ]
if os . path . getsize ( in_file ) > 0 and not os . path . exists ( out_file ) :
with file_transaction ( data , out_file ) as tx_out_file :
cl = ( "{kraken_cmd} --db {db} {in_file} > {tx_out_file}" ) . format ( ** locals ( ) )
do . run ( cl , "kraken report: %s" % dd . get_sample_name ( data ) )
kraken = { "kraken_clas" : classify , "kraken_unclas" : unclassify }
kraken_sum = _summarize_kraken ( out_file )
kraken . update ( kraken_sum )
return kraken
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.