signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def navigate ( self , name , * args ) :
"""Navigate to a route
* @ param { String } name Route name
* @ param { * } arg A single argument to pass to the route handler"""
|
if name not in self . routes :
raise Exception ( 'invalid route name \'%s\'' % name )
elif callable ( self . routes [ name ] ) :
return self . routes [ name ] ( self , * args )
raise Exception ( 'route %s not callable' , name )
|
def up_ec2 ( connection , region , instance_id , wait_for_ssh_available = True , log = False , timeout = 600 ) :
"""boots an existing ec2 _ instance"""
|
# boot the ec2 instance
instance = connection . start_instances ( instance_ids = instance_id ) [ 0 ]
instance . update ( )
while instance . state != "running" and timeout > 1 :
log_yellow ( "Instance state: %s" % instance . state )
if log :
log_yellow ( "Instance state: %s" % instance . state )
sleep ( 10 )
timeout = timeout - 10
instance . update ( )
# and make sure we don ' t return until the instance is fully up
if wait_for_ssh_available :
wait_for_ssh ( instance . ip_address )
|
def main ( ) :
"""This is a Toil pipeline used to perform alignment of fastqs ."""
|
# Define Parser object and add to Toil
if mock_mode ( ) :
usage_msg = 'You have the TOIL_SCRIPTS_MOCK_MODE environment variable set, so this pipeline ' 'will run in mock mode. To disable mock mode, set TOIL_SCRIPTS_MOCK_MODE=0'
else :
usage_msg = None
parser = argparse . ArgumentParser ( usage = usage_msg )
subparsers = parser . add_subparsers ( dest = 'command' )
subparsers . add_parser ( 'generate-config' , help = 'Generates an editable config in the current working directory.' )
subparsers . add_parser ( 'generate-manifest' , help = 'Generates an editable manifest in the current working directory.' )
subparsers . add_parser ( 'generate' , help = 'Generates a config and manifest in the current working directory.' )
# Run subparser
parser_run = subparsers . add_parser ( 'run' , help = 'Runs the ADAM/GATK pipeline' )
default_config = 'adam-gatk-mock.config' if mock_mode ( ) else 'adam-gatk.config'
default_manifest = 'adam-gatk-mock-manifest.csv' if mock_mode ( ) else 'adam-gatk-manifest.csv'
parser_run . add_argument ( '--config' , default = default_config , type = str , help = 'Path to the (filled in) config file, generated with "generate-config".' )
parser_run . add_argument ( '--manifest' , default = default_manifest , type = str , help = 'Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".' )
Job . Runner . addToilOptions ( parser_run )
args = parser . parse_args ( )
cwd = os . getcwd ( )
if args . command == 'generate-config' or args . command == 'generate' :
generate_file ( os . path . join ( cwd , default_config ) , generate_config )
if args . command == 'generate-manifest' or args . command == 'generate' :
generate_file ( os . path . join ( cwd , default_manifest ) , generate_manifest )
# Pipeline execution
elif args . command == 'run' :
require ( os . path . exists ( args . config ) , '{} not found. Please run ' 'generate-config' . format ( args . config ) )
if not hasattr ( args , 'sample' ) :
require ( os . path . exists ( args . manifest ) , '{} not found and no samples provided. Please ' 'run "generate-manifest"' . format ( args . manifest ) )
# Parse config
parsed_config = { x . replace ( '-' , '_' ) : y for x , y in yaml . load ( open ( args . config ) . read ( ) ) . iteritems ( ) }
inputs = argparse . Namespace ( ** parsed_config )
# Parse manifest file
uuid_list = [ ]
with open ( args . manifest ) as f_manifest :
for line in f_manifest :
if not line . isspace ( ) and not line . startswith ( '#' ) :
uuid_list . append ( line . strip ( ) )
inputs . sort = False
if not inputs . dir_suffix :
inputs . dir_suffix = ''
if not inputs . s3_bucket :
inputs . s3_bucket = ''
if inputs . master_ip and inputs . num_nodes :
raise ValueError ( "Exactly one of master_ip (%s) and num_nodes (%d) must be provided." % ( inputs . master_ip , inputs . num_nodes ) )
if not hasattr ( inputs , 'master_ip' ) and inputs . num_nodes <= 1 :
raise ValueError ( 'num_nodes allocates one Spark/HDFS master and n-1 workers, and thus must be greater ' 'than 1. %d was passed.' % inputs . num_nodes )
if ( inputs . pipeline_to_run != "adam" and inputs . pipeline_to_run != "gatk" and inputs . pipeline_to_run != "both" ) :
raise ValueError ( "pipeline_to_run must be either 'adam', 'gatk', or 'both'. %s was passed." % inputs . pipeline_to_run )
Job . Runner . startToil ( Job . wrapJobFn ( sample_loop , uuid_list , inputs ) , args )
|
def get_cardinality ( self , node = None ) :
"""Returns the cardinality of the node . Throws an error if the CPD for the
queried node hasn ' t been added to the network .
Parameters
node : Any hashable python object ( optional ) .
The node whose cardinality we want . If node is not specified returns a
dictionary with the given variable as keys and their respective cardinality
as values .
Returns
int or dict : If node is specified returns the cardinality of the node .
If node is not specified returns a dictionary with the given
variable as keys and their respective cardinality as values .
Examples
> > > from pgmpy . models import BayesianModel
> > > from pgmpy . factors . discrete import TabularCPD
> > > student = BayesianModel ( [ ( ' diff ' , ' grade ' ) , ( ' intel ' , ' grade ' ) ] )
> > > cpd _ diff = TabularCPD ( ' diff ' , 2 , [ [ 0.6,0.4 ] ] ) ;
> > > cpd _ intel = TabularCPD ( ' intel ' , 2 , [ [ 0.7,0.3 ] ] ) ;
> > > cpd _ grade = TabularCPD ( ' grade ' , 2 , [ [ 0.1 , 0.9 , 0.2 , 0.7 ] ,
. . . [ 0.9 , 0.1 , 0.8 , 0.3 ] ] ,
. . . [ ' intel ' , ' diff ' ] , [ 2 , 2 ] )
> > > student . add _ cpds ( cpd _ diff , cpd _ intel , cpd _ grade )
> > > student . get _ cardinality ( )
defaultdict ( int , { ' diff ' : 2 , ' grade ' : 2 , ' intel ' : 2 } )
> > > student . get _ cardinality ( ' intel ' )"""
|
if node :
return self . get_cpds ( node ) . cardinality [ 0 ]
else :
cardinalities = defaultdict ( int )
for cpd in self . cpds :
cardinalities [ cpd . variable ] = cpd . cardinality [ 0 ]
return cardinalities
|
def getLinkedAnalyses ( self ) :
"""Lookup linked Analyses
: returns : sorted list of ANs , where the latest AN comes first"""
|
# Fetch the linked Analyses UIDs
refs = get_backreferences ( self , "AnalysisAttachment" )
# fetch the objects by UID and handle nonexisting UIDs gracefully
ans = map ( lambda uid : api . get_object_by_uid ( uid , None ) , refs )
# filter out None values ( nonexisting UIDs )
ans = filter ( None , ans )
# sort by physical path , so that attachments coming from an AR with a
# higher " - Rn " suffix get sorted correctly .
# N . B . the created date is the same , hence we can not use it
return sorted ( ans , key = api . get_path , reverse = True )
|
def center_land ( world ) :
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders . It operates on elevation and plates map"""
|
y_sums = world . layers [ 'elevation' ] . data . sum ( 1 )
# 1 = = sum along x - axis
y_with_min_sum = y_sums . argmin ( )
if get_verbose ( ) :
print ( "geo.center_land: height complete" )
x_sums = world . layers [ 'elevation' ] . data . sum ( 0 )
# 0 = = sum along y - axis
x_with_min_sum = x_sums . argmin ( )
if get_verbose ( ) :
print ( "geo.center_land: width complete" )
latshift = 0
world . layers [ 'elevation' ] . data = numpy . roll ( numpy . roll ( world . layers [ 'elevation' ] . data , - y_with_min_sum + latshift , axis = 0 ) , - x_with_min_sum , axis = 1 )
world . layers [ 'plates' ] . data = numpy . roll ( numpy . roll ( world . layers [ 'plates' ] . data , - y_with_min_sum + latshift , axis = 0 ) , - x_with_min_sum , axis = 1 )
if get_verbose ( ) :
print ( "geo.center_land: width complete" )
|
def _get_token ( ) :
'''Get an auth token'''
|
username = __opts__ . get ( 'rallydev' , { } ) . get ( 'username' , None )
password = __opts__ . get ( 'rallydev' , { } ) . get ( 'password' , None )
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize'
result = salt . utils . http . query ( path , decode = True , decode_type = 'json' , text = True , status = True , username = username , password = password , cookies = True , persist_session = True , opts = __opts__ , )
if 'dict' not in result :
return None
return result [ 'dict' ] [ 'OperationResult' ] [ 'SecurityToken' ]
|
def get_docargs ( self , args = None , prt = None ) :
"""Pare down docopt . Return a minimal dictionary and a set containing runtime arg values ."""
|
# docargs = self . objdoc . get _ docargs ( args , exp _ letters = set ( [ ' o ' , ' t ' , ' p ' , ' c ' ] ) )
docargs = self . objdoc . get_docargs ( args , prt )
self . _chk_docopts ( docargs )
return docargs
|
def clean_query_Dict ( cls , query_Dict ) :
"""removes NoneTypes from the dict"""
|
return { k : v for k , v in query_Dict . items ( ) if v }
|
def set_data ( self , index , data ) :
"""Set the complete data for a single line strip .
Parameters
index : int
The index of the line strip to be replaced .
data : array - like
The data to assign to the selected line strip ."""
|
self . _pos_tex [ index , : ] = data
self . update ( )
|
def remove_user_from_group ( self , username , groupname ) :
"""Remove a user from a group .
: param username : The user to remove from the group .
: param groupname : The group that the user will be removed from ."""
|
url = self . _options [ 'server' ] + '/rest/api/latest/group/user'
x = { 'groupname' : groupname , 'username' : username }
self . _session . delete ( url , params = x )
return True
|
def assign_issue ( issue_key , assignee , server = None , username = None , password = None ) :
'''Assign the issue to an existing user . Return ` ` True ` ` when the issue has
been properly assigned .
issue _ key
The JIRA ID of the ticket to manipulate .
assignee
The name of the user to assign the ticket to .
CLI Example :
salt ' * ' jira . assign _ issue NET - 123 example _ user'''
|
jira_ = _get_jira ( server = server , username = username , password = password )
assigned = jira_ . assign_issue ( issue_key , assignee )
return assigned
|
def _compute_bgid ( self , bg = None ) :
"""Return a unique identifier for the background data"""
|
if bg is None :
bg = self . _bgdata
if isinstance ( bg , qpimage . QPImage ) : # Single QPImage
if "identifier" in bg :
return bg [ "identifier" ]
else :
data = [ bg . amp , bg . pha ]
for key in sorted ( list ( bg . meta . keys ( ) ) ) :
val = bg . meta [ key ]
data . append ( "{}={}" . format ( key , val ) )
return hash_obj ( data )
elif ( isinstance ( bg , list ) and isinstance ( bg [ 0 ] , qpimage . QPImage ) ) : # List of QPImage
data = [ ]
for bgii in bg :
data . append ( self . _compute_bgid ( bgii ) )
return hash_obj ( data )
elif ( isinstance ( bg , SeriesData ) and ( len ( bg ) == 1 or len ( bg ) == len ( self ) ) ) : # DataSet
return bg . identifier
else :
raise ValueError ( "Unknown background data type: {}" . format ( bg ) )
|
def get_combined_size ( tiles ) :
"""Calculate combined size of tiles ."""
|
# TODO : Refactor calculating layout to avoid repetition .
columns , rows = calc_columns_rows ( len ( tiles ) )
tile_size = tiles [ 0 ] . image . size
return ( tile_size [ 0 ] * columns , tile_size [ 1 ] * rows )
|
def calculate_mrcas ( self , c1 : ClassId , c2 : ClassId ) -> Set [ ClassId ] :
"""Calculate the MRCA for a class pair"""
|
G = self . G
# reflexive ancestors
ancs1 = self . _ancestors ( c1 ) | { c1 }
ancs2 = self . _ancestors ( c2 ) | { c2 }
common_ancestors = ancs1 & ancs2
redundant = set ( )
for a in common_ancestors :
redundant = redundant | nx . ancestors ( G , a )
return common_ancestors - redundant
|
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values ."""
|
# extracting dictionary of coefficients specific to required
# intensity measure type .
C = self . COEFFS [ imt ]
imean = self . _get_mean ( C , rup , dists , sites )
if imt . name in "SA PGA" : # Convert units to g ,
# but only for PGA and SA ( not PGV ) :
mean = np . log ( ( 10.0 ** ( imean - 2.0 ) ) / g )
else : # PGV :
mean = np . log ( 10.0 ** imean )
istddevs = self . _get_stddevs ( C , stddev_types , len ( sites . vs30 ) )
stddevs = np . log ( 10.0 ** np . array ( istddevs ) )
return mean + self . adjustment_factor , stddevs
|
def get_metric ( self , timestamp ) :
"""Get a metric including all current time series .
Get a : class : ` opencensus . metrics . export . metric . Metric ` with one
: class : ` opencensus . metrics . export . time _ series . TimeSeries ` for each
set of label values with a recorded measurement . Each ` TimeSeries `
has a single point that represents the last recorded value .
: type timestamp : : class : ` datetime . datetime `
: param timestamp : Recording time to report , usually the current time .
: rtype : : class : ` opencensus . metrics . export . metric . Metric ` or None
: return : A converted metric for all current measurements ."""
|
if not self . points :
return None
with self . _points_lock :
ts_list = get_timeseries_list ( self . points , timestamp )
return metric . Metric ( self . descriptor , ts_list )
|
def r_get_numbers ( matchgroup , num ) :
"""A helper function which can be used similarly to fscanf ( fid , ' % f ' , num ) to extract num arguments from the regex iterator"""
|
res = [ ]
for i in range ( num ) :
res . append ( float ( matchgroup . next ( ) . group ( ) ) )
return np . array ( res )
|
def _initialize_context ( self , trace_header ) :
"""Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments ."""
|
sampled = None
if not global_sdk_config . sdk_enabled ( ) : # Force subsequent subsegments to be disabled and turned into DummySegments .
sampled = False
elif trace_header . sampled == 0 :
sampled = False
elif trace_header . sampled == 1 :
sampled = True
segment = FacadeSegment ( name = 'facade' , traceid = trace_header . root , entityid = trace_header . parent , sampled = sampled , )
setattr ( self . _local , 'segment' , segment )
setattr ( self . _local , 'entities' , [ ] )
|
def multiply_adjacent_elements ( input_list ) :
"""This function multiplies neighbouring elements in a given list .
Args :
input _ list : The list whose consecutive elements are to be multiplied .
Returns :
A list of the products of neighbouring elements in the input list .
Examples :
> > > multiply _ adjacent _ elements ( [ 1 , 1 , 3 , 4 , 4 , 5 , 6 , 7 ] )
[1 , 3 , 12 , 16 , 20 , 30 , 42]
> > > multiply _ adjacent _ elements ( [ 4 , 5 , 8 , 9 , 6 , 10 ] )
[20 , 40 , 72 , 54 , 60]
> > > multiply _ adjacent _ elements ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] )
[2 , 6 , 12 , 20 , 30 , 42 , 56 , 72 , 90]"""
|
product_list = [ input_list [ i ] * input_list [ i + 1 ] for i in range ( len ( input_list ) - 1 ) ]
return product_list
|
def parse_link ( self , node ) :
"""Parses < Link >
@ param node : Node containing the < Link > element
@ type node : xml . etree . Element"""
|
if 'name' in node . lattrib :
name = node . lattrib [ 'name' ]
else :
self . raise_error ( '<Link> must specify a name' )
if 'type' in node . lattrib :
type_ = node . lattrib [ 'type' ]
else :
self . raise_error ( "Link '{0}' must specify a type" , name )
description = node . lattrib . get ( 'description' , '' )
self . current_component_type . add_link ( Link ( name , type_ , description ) )
|
def get_wallet_height ( self , id = None , endpoint = None ) :
"""Get the current wallet index height .
Args :
id : ( int , optional ) id to use for response tracking
endpoint : ( RPCEndpoint , optional ) endpoint to specify to use
Returns :
json object of the result or the error encountered in the RPC call"""
|
return self . _call_endpoint ( GET_WALLET_HEIGHT , id = id , endpoint = endpoint )
|
def voronoi_partition ( G , outline ) :
"""For 2D - embedded graph ` G ` , within the boundary given by the shapely polygon
` outline ` , returns ` G ` with the Voronoi cell region as an additional node
attribute ."""
|
# following line from vresutils . graph caused a bug
# G = polygon _ subgraph ( G , outline , copy = False )
points = list ( vresutils . graph . get_node_attributes ( G , 'pos' ) . values ( ) )
regions = vresutils . graph . voronoi_partition_pts ( points , outline , no_multipolygons = True )
nx . set_node_attributes ( G , 'region' , dict ( zip ( G . nodes ( ) , regions ) ) )
return G
|
def add_to_manifest ( self , manifest ) :
"""Add useful details to the manifest about this service
so that it can be used in an application .
: param manifest : An predix . admin . app . Manifest object
instance that manages reading / writing manifest config
for a cloud foundry app ."""
|
# Add this service to list of services
manifest . add_service ( self . service . name )
# Add environment variables
manifest . add_env_var ( self . __module__ + '.uri' , self . service . settings . data [ 'url' ] )
manifest . add_env_var ( self . __module__ + '.zone_id' , self . get_predix_zone_id ( ) )
manifest . write_manifest ( )
|
def set_address ( self , host , port ) :
"""Add host and port attributes"""
|
self . host = host
self . port = port
|
def _req_fix ( self , line ) :
"""Fix slacky and salix requirements because many dependencies splitting
with " , " and others with " | " """
|
deps = [ ]
for dep in line [ 18 : ] . strip ( ) . split ( "," ) :
dep = dep . split ( "|" )
if self . repo == "slacky" :
if len ( dep ) > 1 :
for d in dep :
deps . append ( d . split ( ) [ 0 ] )
dep = "" . join ( dep )
deps . append ( dep . split ( ) [ 0 ] )
else :
if len ( dep ) > 1 :
for d in dep :
deps . append ( d )
deps . append ( dep [ 0 ] )
return deps
|
def fetch_file ( dataset_name , url , dataset_dir , dataset_prefix = None , default_paths = None , filetype = None , resume = True , overwrite = False , md5sum = None , username = None , password = None , retry = 0 , verbose = 1 , temp_downloads = None ) :
"""Load requested file , downloading it if needed or requested .
: param str url : contains the url of the file to be downloaded .
: param str dataset _ dir : path of the data directory . Used for data
storage in the specified location .
: param bool resume : if true , try to resume partially downloaded files
: param overwrite : if bool true and file already exists , delete it .
: param str md5sum : MD5 sum of the file . Checked if download of the file
is required
: param str username : username used for basic HTTP authentication
: param str password : password used for basic HTTP authentication
: param int verbose : verbosity level ( 0 means no message ) .
: returns : absolute path of downloaded file
: rtype : str
. . note : :
If , for any reason , the download procedure fails , all downloaded files are
removed ."""
|
final_path , cached = _get_dataset ( dataset_name , dataset_prefix = dataset_prefix , data_dir = dataset_dir , default_paths = default_paths , verbose = verbose )
if cached and not overwrite :
return final_path
data_dir = final_path . parent
if temp_downloads is None :
temp_downloads = NIWORKFLOWS_CACHE_DIR / 'downloads'
temp_downloads = Path ( temp_downloads )
temp_downloads . mkdir ( parents = True , exist_ok = True )
# Determine filename using URL
parse = urlparse ( url )
file_name = op . basename ( parse . path )
if file_name == '' :
file_name = _md5_hash ( parse . path )
if filetype is not None :
file_name += filetype
temp_full_path = temp_downloads / file_name
temp_part_path = temp_full_path . with_name ( file_name + '.part' )
if overwrite :
shutil . rmtree ( str ( dataset_dir ) , ignore_errors = True )
if temp_full_path . exists ( ) :
temp_full_path . unlink ( )
t_0 = time . time ( )
local_file = None
initial_size = 0
# Download data
request = Request ( url )
request . add_header ( 'Connection' , 'Keep-Alive' )
if username is not None and password is not None :
if not url . startswith ( 'https' ) :
raise ValueError ( 'Authentication was requested on a non secured URL ({0!s}).' 'Request has been blocked for security reasons.' . format ( url ) )
# Note : HTTPBasicAuthHandler is not fitted here because it relies
# on the fact that the server will return a 401 error with proper
# www - authentication header , which is not the case of most
# servers .
encoded_auth = base64 . b64encode ( ( username + ':' + password ) . encode ( ) )
request . add_header ( b'Authorization' , b'Basic ' + encoded_auth )
if verbose > 0 :
displayed_url = url . split ( '?' ) [ 0 ] if verbose == 1 else url
NIWORKFLOWS_LOG . info ( 'Downloading data from %s ...' , displayed_url )
if resume and temp_part_path . exists ( ) : # Download has been interrupted , we try to resume it .
local_file_size = temp_part_path . stat ( ) . st_size
# If the file exists , then only download the remainder
request . add_header ( "Range" , "bytes={}-" . format ( local_file_size ) )
try :
data = urlopen ( request )
content_range = data . info ( ) . get ( 'Content-Range' )
if ( content_range is None or not content_range . startswith ( 'bytes {}-' . format ( local_file_size ) ) ) :
raise IOError ( 'Server does not support resuming' )
except Exception : # A wide number of errors can be raised here . HTTPError ,
# URLError . . . I prefer to catch them all and rerun without
# resuming .
if verbose > 0 :
NIWORKFLOWS_LOG . warn ( 'Resuming failed, try to download the whole file.' )
return fetch_file ( dataset_name , url , dataset_dir , resume = False , overwrite = overwrite , md5sum = md5sum , username = username , password = password , verbose = verbose )
initial_size = local_file_size
mode = 'ab'
else :
try :
data = urlopen ( request )
except ( HTTPError , URLError ) :
if retry < MAX_RETRIES :
if verbose > 0 :
NIWORKFLOWS_LOG . warn ( 'Download failed, retrying (attempt %d)' , retry + 1 )
time . sleep ( 5 )
return fetch_file ( dataset_name , url , dataset_dir , resume = False , overwrite = overwrite , md5sum = md5sum , username = username , password = password , verbose = verbose , retry = retry + 1 )
else :
raise
mode = 'wb'
with temp_part_path . open ( mode ) as local_file :
_chunk_read_ ( data , local_file , report_hook = ( verbose > 0 ) , initial_size = initial_size , verbose = verbose )
temp_part_path . replace ( temp_full_path )
delta_t = time . time ( ) - t_0
if verbose > 0 : # Complete the reporting hook
sys . stderr . write ( ' ...done. ({0:.0f} seconds, {1:.0f} min)\n' . format ( delta_t , delta_t // 60 ) )
if md5sum is not None :
if _md5_sum_file ( temp_full_path ) != md5sum :
raise ValueError ( "File {!s} checksum verification has failed." " Dataset fetching aborted." . format ( temp_full_path ) )
if filetype is None :
fname , filetype = op . splitext ( temp_full_path . name )
if filetype == '.gz' :
fname , ext = op . splitext ( fname )
filetype = ext + filetype
if filetype . startswith ( '.' ) :
filetype = filetype [ 1 : ]
if filetype . startswith ( 'tar' ) :
args = 'xf' if not filetype . endswith ( 'gz' ) else 'xzf'
sp . check_call ( [ 'tar' , args , str ( temp_full_path ) ] , cwd = data_dir )
temp_full_path . unlink ( )
return final_path
if filetype == 'zip' :
import zipfile
sys . stderr . write ( 'Unzipping package (%s) to data path (%s)...' % ( temp_full_path , data_dir ) )
with zipfile . ZipFile ( str ( temp_full_path ) , 'r' ) as zip_ref :
zip_ref . extractall ( data_dir )
sys . stderr . write ( 'done.\n' )
return final_path
return final_path
|
def reindex ( self , indexers = None , method = None , tolerance = None , copy = True , ** indexers_kwargs ) :
"""Conform this object onto a new set of indexes , filling in
missing values with NaN .
Parameters
indexers : dict , optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels . Any mis - matched coordinate
values will be filled in with NaN , and any mis - matched dimension
names will simply be ignored .
One of indexers or indexers _ kwargs must be provided .
copy : bool , optional
If ` ` copy = True ` ` , data in the return value is always copied . If
` ` copy = False ` ` and reindexing is unnecessary , or can be performed
with only slice operations , then the output may share memory with
the input . In either case , a new xarray object is always returned .
method : { None , ' nearest ' , ' pad ' / ' ffill ' , ' backfill ' / ' bfill ' } , optional
Method to use for filling index values in ` ` indexers ` ` not found on
this data array :
* None ( default ) : don ' t fill gaps
* pad / ffill : propagate last valid index value forward
* backfill / bfill : propagate next valid index value backward
* nearest : use nearest valid index value ( requires pandas > = 0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches . The values of the index at the matching locations must
satisfy the equation ` ` abs ( index [ indexer ] - target ) < = tolerance ` ` .
* * indexers _ kwarg : { dim : indexer , . . . } , optional
The keyword arguments form of ` ` indexers ` ` .
One of indexers or indexers _ kwargs must be provided .
Returns
reindexed : DataArray
Another dataset array , with this array ' s data but replaced
coordinates .
See Also
DataArray . reindex _ like
align"""
|
indexers = either_dict_or_kwargs ( indexers , indexers_kwargs , 'reindex' )
ds = self . _to_temp_dataset ( ) . reindex ( indexers = indexers , method = method , tolerance = tolerance , copy = copy )
return self . _from_temp_dataset ( ds )
|
def getHostsFromFile ( filename ) :
"""Parse a file to return a list of hosts ."""
|
valid_hostname = r"^[^ /\t=\n]+"
workers = r"\d+"
hostname_re = re . compile ( valid_hostname )
worker_re = re . compile ( workers )
hosts = [ ]
with open ( filename ) as f :
for line in f : # check to see if it is a SLURM grouping instead of a
# regular list of hosts
if re . search ( '[\[\]]' , line ) :
hosts = hosts + parseSLURM ( line . strip ( ) )
else :
host = hostname_re . search ( line . strip ( ) )
if host :
hostname = host . group ( )
n = worker_re . search ( line [ host . end ( ) : ] )
if n :
n = n . group ( )
else : # Automatically assign based on CPU count
n = 0
hosts . append ( ( hostname , int ( n ) ) )
return hosts
|
def connect_edges ( graph ) :
"""Given a Graph element containing abstract edges compute edge
segments directly connecting the source and target nodes . This
operation just uses internal HoloViews operations and will be a
lot slower than the pandas equivalent ."""
|
paths = [ ]
for start , end in graph . array ( graph . kdims ) :
start_ds = graph . nodes [ : , : , start ]
end_ds = graph . nodes [ : , : , end ]
if not len ( start_ds ) or not len ( end_ds ) :
raise ValueError ( 'Could not find node positions for all edges' )
start = start_ds . array ( start_ds . kdims [ : 2 ] )
end = end_ds . array ( end_ds . kdims [ : 2 ] )
paths . append ( np . array ( [ start [ 0 ] , end [ 0 ] ] ) )
return paths
|
def worker_task ( work_item , config ) :
"""The celery task which performs a single mutation and runs a test suite .
This runs ` cosmic - ray worker ` in a subprocess and returns the results ,
passing ` config ` to it via stdin .
Args :
work _ item : A dict describing a WorkItem .
config : The configuration to use for the test execution .
Returns : An updated WorkItem"""
|
global _workspace
_ensure_workspace ( config )
result = worker ( work_item . module_path , config . python_version , work_item . operator_name , work_item . occurrence , config . test_command , config . timeout )
return work_item . job_id , result
|
def cover ( ctx , html = False ) :
'''Run tests suite with coverage'''
|
header ( 'Run tests suite with coverage' )
cmd = 'pytest --cov udata --cov-report term'
if html :
cmd = ' ' . join ( ( cmd , '--cov-report html:reports/python/cover' ) )
with ctx . cd ( ROOT ) :
ctx . run ( cmd , pty = True )
|
def extract_file_from_zip ( bytes_io , expected_file ) :
"""Extracts a file from a bytes _ io zip . Returns bytes"""
|
zipf = zipfile . ZipFile ( bytes_io )
return zipf . read ( expected_file )
|
def cap ( self ) :
"""" Caps " the construction of the pipeline , signifying that no more inputs
and outputs are expected to be added and therefore the input and output
nodes can be created along with the provenance ."""
|
to_cap = ( self . _inputnodes , self . _outputnodes , self . _prov )
if to_cap == ( None , None , None ) :
self . _inputnodes = { f : self . _make_inputnode ( f ) for f in self . input_frequencies }
self . _outputnodes = { f : self . _make_outputnode ( f ) for f in self . output_frequencies }
self . _prov = self . _gen_prov ( )
elif None in to_cap :
raise ArcanaError ( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}" . format ( self ) )
|
def vqa_recurrent_self_attention_base ( ) :
"""VQA attention baseline hparams ."""
|
hparams = universal_transformer . universal_transformer_base ( )
hparams . batch_size = 1024
hparams . use_fixed_batch_size = True
hparams . weight_decay = 0.
hparams . clip_grad_norm = 0.
# use default initializer
# hparams . initializer = " xavier "
hparams . learning_rate_schedule = ( "constant*linear_warmup*rsqrt_normalized_decay" )
hparams . learning_rate_warmup_steps = 8000
hparams . learning_rate_constant = 7e-4
hparams . learning_rate_decay_rate = 0.5
hparams . learning_rate_decay_steps = 50000
# hparams . dropout = 0.5
hparams . summarize_grads = True
hparams . summarize_vars = True
# not used hparams
hparams . label_smoothing = 0.1
hparams . multiply_embedding_mode = "sqrt_depth"
# add new hparams
# use raw image as input
hparams . add_hparam ( "image_input_type" , "feature" )
hparams . add_hparam ( "image_model_fn" , "resnet_v1_152" )
hparams . add_hparam ( "resize_side" , 512 )
hparams . add_hparam ( "height" , 448 )
hparams . add_hparam ( "width" , 448 )
hparams . add_hparam ( "distort" , True )
hparams . add_hparam ( "train_resnet" , False )
# question hidden size
# hparams . hidden _ size = 512
# hparams . filter _ size = 1024
# hparams . num _ hidden _ layers = 4
# self attention parts
# hparams . norm _ type = " layer "
# hparams . layer _ preprocess _ sequence = " n "
# hparams . layer _ postprocess _ sequence = " da "
# hparams . layer _ prepostprocess _ dropout = 0.1
# hparams . attention _ dropout = 0.1
# hparams . relu _ dropout = 0.1
# hparams . add _ hparam ( " pos " , " timing " )
# hparams . add _ hparam ( " num _ encoder _ layers " , 0)
# hparams . add _ hparam ( " num _ decoder _ layers " , 0)
# hparams . add _ hparam ( " num _ heads " , 8)
# hparams . add _ hparam ( " attention _ key _ channels " , 0)
# hparams . add _ hparam ( " attention _ value _ channels " , 0)
# hparams . add _ hparam ( " self _ attention _ type " , " dot _ product " )
# iterative part
hparams . transformer_ffn_type = "fc"
return hparams
|
def write_languages ( f , l ) :
"""Write language information ."""
|
f . write ( "Languages = {%s" % os . linesep )
for lang in sorted ( l ) :
f . write ( " %r: %r,%s" % ( lang , l [ lang ] , os . linesep ) )
f . write ( "}%s" % os . linesep )
|
def _parse ( jsonOutput ) :
'''Parses JSON response from Tika REST API server
: param jsonOutput : JSON output from Tika Server
: return : a dictionary having ' metadata ' and ' content ' values'''
|
parsed = { }
if not jsonOutput :
return parsed
parsed [ "status" ] = jsonOutput [ 0 ]
if jsonOutput [ 1 ] == None or jsonOutput [ 1 ] == "" :
return parsed
realJson = json . loads ( jsonOutput [ 1 ] )
content = ""
for js in realJson :
if "X-TIKA:content" in js :
content += js [ "X-TIKA:content" ]
if content == "" :
content = None
parsed [ "content" ] = content
parsed [ "metadata" ] = { }
for js in realJson :
for n in js :
if n != "X-TIKA:content" :
if n in parsed [ "metadata" ] :
if not isinstance ( parsed [ "metadata" ] [ n ] , list ) :
parsed [ "metadata" ] [ n ] = [ parsed [ "metadata" ] [ n ] ]
parsed [ "metadata" ] [ n ] . append ( js [ n ] )
else :
parsed [ "metadata" ] [ n ] = js [ n ]
return parsed
|
def list_nodes_select ( call = None ) :
'''Return a list of the VMs that are on the provider , with select fields'''
|
if call == 'action' :
raise SaltCloudSystemExit ( 'The list_nodes_select function must be called ' 'with -f or --function.' )
selection = __opts__ . get ( 'query.selection' )
if not selection :
raise SaltCloudSystemExit ( 'query.selection not found in /etc/salt/cloud' )
# TODO : somewhat doubt the implementation of cloud . list _ nodes _ select
return salt . utils . cloud . list_nodes_select ( list_nodes_full ( ) , selection , call , )
|
def full_value ( self ) :
"""Returns the full value with the path also ( ie , name = value ( path ) )
: returns : String"""
|
s = self . name_value ( )
s += self . path_value ( )
s += "\n\n"
return s
|
def store_array_elements ( self , array , start_idx , data ) :
"""Stores either a single element or a range of elements in the array .
: param array : Reference to the array .
: param start _ idx : Starting index for the store .
: param data : Either a single value or a list of values ."""
|
# we process data as a list of elements
# = > if there is only a single element , wrap it in a list
data = data if isinstance ( data , list ) else [ data ]
# concretize start index
concrete_start_idxes = self . concretize_store_idx ( start_idx )
if len ( concrete_start_idxes ) == 1 : # only one start index
# = > concrete store
concrete_start_idx = concrete_start_idxes [ 0 ]
for i , value in enumerate ( data ) :
self . _store_array_element_on_heap ( array = array , idx = concrete_start_idx + i , value = value , value_type = array . element_type )
# if the index was symbolic before concretization , this
# constraint it to concrete start idx
self . state . solver . add ( concrete_start_idx == start_idx )
else : # multiple start indexes
# = > symbolic store
start_idx_options = [ ]
for concrete_start_idx in concrete_start_idxes :
start_idx_options . append ( concrete_start_idx == start_idx )
# we store elements condtioned with the start index :
# = > if concrete _ start _ idx = = start _ idx
# then store the value
# else keep the current value
for i , value in enumerate ( data ) :
self . _store_array_element_on_heap ( array = array , idx = concrete_start_idx + i , value = value , value_type = array . element_type , store_condition = start_idx_options [ - 1 ] )
# constraint start _ idx , s . t . it evals to one of the concretized indexes
constraint_on_start_idx = self . state . solver . Or ( * start_idx_options )
self . state . add_constraints ( constraint_on_start_idx )
|
def get_editor_buffer_for_location ( self , location ) :
"""Return the ` EditorBuffer ` for this location .
When this file was not yet loaded , return None"""
|
for eb in self . editor_buffers :
if eb . location == location :
return eb
|
def parse_resource_extended ( self , session , resource_name ) :
"""Parse a resource string to get extended interface information .
Corresponds to viParseRsrcEx function of the VISA library .
: param session : Resource Manager session ( should always be the Default Resource Manager for VISA
returned from open _ default _ resource _ manager ( ) ) .
: param resource _ name : Unique symbolic name of a resource .
: return : Resource information , return value of the library call .
: rtype : : class : ` pyvisa . highlevel . ResourceInfo ` , : class : ` pyvisa . constants . StatusCode `"""
|
try :
parsed = rname . parse_resource_name ( resource_name )
return ( ResourceInfo ( parsed . interface_type_const , parsed . board , parsed . resource_class , str ( parsed ) , None ) , constants . StatusCode . success )
except ValueError :
return 0 , constants . StatusCode . error_invalid_resource_name
|
def select_gist ( self , allow_none = False ) :
"""Given the requested filename , it selects the proper gist ; if more than
one gist is found with the given filename , user is asked to choose .
: allow _ none : ( bool ) for ` getgist ` it should raise error if no gist is
found , but setting this argument to True avoid this error , which is
useful when ` putgist ` is calling this method
: return : ( dict ) selected gist"""
|
# pick up all macthing gists
matches = list ( )
for gist in self . get_gists ( ) :
for gist_file in gist . get ( "files" ) :
if self . filename == gist_file . get ( "filename" ) :
matches . append ( gist )
# abort if no match is found
if not matches :
if allow_none :
return None
else :
msg = "No file named `{}` found in {}'s gists"
self . oops ( msg . format ( self . file_path , self . user ) )
if not self . is_authenticated :
self . warn ( "To access private gists set the GETGIST_TOKEN" )
self . warn ( "(see `getgist --help` for details)" )
return False
# return if there ' s is only one match
if len ( matches ) == 1 or self . assume_yes :
return matches . pop ( 0 )
return self . _ask_which_gist ( matches )
|
def calculate_column_sum ( matrix , column_index ) :
"""This function computes the sum of a given column in a given 2D list ( matrix ) .
For example ,
Given matrix = [ [ 1 , 2 , 3 , 2 ] , [ 4 , 5 , 6 , 2 ] , [ 7 , 8 , 9 , 5 ] ] ,
calculate _ column _ sum ( matrix , 0 ) will give 12,
calculate _ column _ sum ( matrix , 1 ) will give 15,
calculate _ column _ sum ( matrix , 3 ) will give 9.
: param matrix : A 2D list of integers
: param column _ index : Index of the column whose sum needs to be computed
: return : Sum of all the elements in the specified column of the 2D list"""
|
column_total = sum ( row [ column_index ] for row in matrix )
return column_total
|
def revise_sql ( query , id_column , output_table , max_date_column , min_date_column , date_column , date , source_id_column = None ) :
"""Given an expensive query that aggregates temporal data ,
Revise the results to censor before a particular date"""
|
if source_id_column is None :
source_id_column = id_column
if hasattr ( id_column , '__iter__' ) :
id_column = str . join ( ', ' , id_column )
if hasattr ( source_id_column , '__iter__' ) :
source_id_column = str . join ( ', ' , source_id_column )
sql_vars = dict ( query = query , id_column = id_column , output_table = output_table , max_date_column = max_date_column , min_date_column = min_date_column , date_column = date_column , date = date , source_id_column = source_id_column )
sql_vars [ 'ids_query' ] = """
SELECT {id_column} FROM {output_table}
WHERE {max_date_column} >= '{date}' AND {min_date_column} < '{date}'""" . format ( ** sql_vars )
sql_vars [ 'revised_query' ] = query . replace ( '1=1' , "(({source_id_column}) in (select * from ids_query) and {date_column} < '{date}')" . format ( ** sql_vars ) )
new_query = """
with ids_query as ({ids_query})
select * from ({revised_query}) t
""" . format ( ** sql_vars )
return new_query
|
def snmp_server_group_group_auth_mode ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
group = ET . SubElement ( snmp_server , "group" )
group_name_key = ET . SubElement ( group , "group-name" )
group_name_key . text = kwargs . pop ( 'group_name' )
group_version_key = ET . SubElement ( group , "group-version" )
group_version_key . text = kwargs . pop ( 'group_version' )
group_auth_mode = ET . SubElement ( group , "group-auth-mode" )
group_auth_mode . text = kwargs . pop ( 'group_auth_mode' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def com_google_fonts_check_metadata_valid_name_values ( style , font_metadata , font_familynames , typographic_familynames ) :
"""METADATA . pb font . name field contains font name in right format ?"""
|
from fontbakery . constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES :
familynames = font_familynames
else :
familynames = typographic_familynames
failed = False
for font_familyname in familynames :
if font_familyname not in font_metadata . name :
failed = True
yield FAIL , ( "METADATA.pb font.name field (\"{}\")" " does not match correct font name format (\"{}\")." "" ) . format ( font_metadata . name , font_familyname )
if not failed :
yield PASS , ( "METADATA.pb font.name field contains" " font name in right format." )
|
def convertShape ( shapeString ) :
"""Convert xml shape string into float tuples .
This method converts the 2d or 3d shape string from SUMO ' s xml file
into a list containing 3d float - tuples . Non existant z coordinates default
to zero . If shapeString is empty , an empty list will be returned ."""
|
cshape = [ ]
for pointString in shapeString . split ( ) :
p = [ float ( e ) for e in pointString . split ( "," ) ]
if len ( p ) == 2 :
cshape . append ( ( p [ 0 ] , p [ 1 ] , 0. ) )
elif len ( p ) == 3 :
cshape . append ( tuple ( p ) )
else :
raise ValueError ( 'Invalid shape point "%s", should be either 2d or 3d' % pointString )
return cshape
|
def create_node ( ctx , path ) :
"""Create node for given relative path .
: param ctx : BuildContext object .
: param path : Relative path relative to top directory .
: return : Created Node ."""
|
# Ensure given context object is BuildContext object
_ensure_build_context ( ctx )
# Get top directory ' s relative path relative to ` wscript ` directory
top_dir_relpath = os . path . relpath ( # Top directory ' s absolute path
ctx . top_dir , # ` wscript ` directory ' s absolute path
ctx . run_dir , )
# Convert given relative path to be relative to ` wscript ` directory
node_path = os . path . join ( top_dir_relpath , path )
# Create node using the relative path relative to ` wscript ` directory
node = ctx . path . make_node ( node_path )
# Return the created node
return node
|
def raises ( self , expected_exception ) :
"""Ensures preceding predicates ( specifically , : meth : ` called _ with ( ) ` ) result in * expected _ exception * being raised ."""
|
return unittest_case . assertRaises ( expected_exception , self . _orig_subject , * self . _args , ** self . _kwargs )
|
def validate ( self , sig = None ) :
'''Check if file matches its signature'''
|
if sig is not None :
sig_mtime , sig_size , sig_md5 = sig
else :
try :
with open ( self . sig_file ( ) ) as sig :
sig_mtime , sig_size , sig_md5 = sig . read ( ) . strip ( ) . split ( )
except :
return False
if not self . exists ( ) :
if ( self + '.zapped' ) . is_file ( ) :
with open ( self + '.zapped' ) as sig :
line = sig . readline ( )
return sig_md5 == line . strip ( ) . rsplit ( '\t' , 3 ) [ - 1 ]
else :
return False
if sig_mtime == os . path . getmtime ( self ) and sig_size == os . path . getsize ( self ) :
return True
return fileMD5 ( self ) == sig_md5
|
def _convert_rename ( self , fc ) :
"""Convert a FileRenameCommand into a new FileCommand .
: return : None if the rename is being ignored , otherwise a
new FileCommand based on the whether the old and new paths
are inside or outside of the interesting locations ."""
|
old = fc . old_path
new = fc . new_path
keep_old = self . _path_to_be_kept ( old )
keep_new = self . _path_to_be_kept ( new )
if keep_old and keep_new :
fc . old_path = self . _adjust_for_new_root ( old )
fc . new_path = self . _adjust_for_new_root ( new )
return fc
elif keep_old : # The file has been renamed to a non - interesting location .
# Delete it !
old = self . _adjust_for_new_root ( old )
return commands . FileDeleteCommand ( old )
elif keep_new : # The file has been renamed into an interesting location
# We really ought to add it but we don ' t currently buffer
# the contents of all previous files and probably never want
# to . Maybe fast - import - info needs to be extended to
# remember all renames and a config file can be passed
# into here ala fast - import ?
self . warning ( "cannot turn rename of %s into an add of %s yet" % ( old , new ) )
return None
|
def create_mssql_pymssql ( username , password , host , port , database , ** kwargs ) : # pragma : no cover
"""create an engine connected to a mssql database using pymssql ."""
|
return create_engine ( _create_mssql_pymssql ( username , password , host , port , database ) , ** kwargs )
|
def set_next_week_day ( val , week_day , iso = False ) :
"""Set week day .
New date will be greater or equal than input date .
: param val : datetime or date
: type val : datetime . datetime | datetime . date
: param week _ day : Week day to set
: type week _ day : int
: param iso : week _ day in ISO format , or not
: type iso : bool
: return : datetime . datetime | datetime . date"""
|
return _set_week_day ( val , week_day , val . isoweekday ( ) if iso else val . weekday ( ) , sign = 1 )
|
def execute_streaming_sql ( self , session , sql , transaction = None , params = None , param_types = None , resume_token = None , query_mode = None , partition_token = None , seqno = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Like ` ` ExecuteSql ` ` , except returns the result set as a stream . Unlike
` ` ExecuteSql ` ` , there is no limit on the size of the returned result
set . However , no individual row in the result set can exceed 100 MiB ,
and no column value can exceed 10 MiB .
Example :
> > > from google . cloud import spanner _ v1
> > > client = spanner _ v1 . SpannerClient ( )
> > > session = client . session _ path ( ' [ PROJECT ] ' , ' [ INSTANCE ] ' , ' [ DATABASE ] ' , ' [ SESSION ] ' )
> > > # TODO : Initialize ` sql ` :
> > > sql = ' '
> > > for element in client . execute _ streaming _ sql ( session , sql ) :
. . . # process element
. . . pass
Args :
session ( str ) : Required . The session in which the SQL query should be performed .
sql ( str ) : Required . The SQL string .
transaction ( Union [ dict , ~ google . cloud . spanner _ v1 . types . TransactionSelector ] ) : The transaction to use . If none is provided , the default is a
temporary read - only transaction with strong concurrency .
The transaction to use .
For queries , if none is provided , the default is a temporary read - only
transaction with strong concurrency .
Standard DML statements require a ReadWrite transaction . Single - use
transactions are not supported ( to avoid replay ) . The caller must
either supply an existing transaction ID or begin a new transaction .
Partitioned DML requires an existing PartitionedDml transaction ID .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . spanner _ v1 . types . TransactionSelector `
params ( Union [ dict , ~ google . cloud . spanner _ v1 . types . Struct ] ) : The SQL string can contain parameter placeholders . A parameter
placeholder consists of ` ` ' @ ' ` ` followed by the parameter name .
Parameter names consist of any combination of letters , numbers , and
underscores .
Parameters can appear anywhere that a literal value is expected . The
same parameter name can be used more than once , for example :
` ` " WHERE id > @ msg _ id AND id < @ msg _ id + 100 " ` `
It is an error to execute an SQL statement with unbound parameters .
Parameter values are specified using ` ` params ` ` , which is a JSON object
whose keys are parameter names , and whose values are the corresponding
parameter values .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . spanner _ v1 . types . Struct `
param _ types ( dict [ str - > Union [ dict , ~ google . cloud . spanner _ v1 . types . Type ] ] ) : It is not always possible for Cloud Spanner to infer the right SQL type
from a JSON value . For example , values of type ` ` BYTES ` ` and values of
type ` ` STRING ` ` both appear in ` ` params ` ` as JSON strings .
In these cases , ` ` param _ types ` ` can be used to specify the exact SQL
type for some or all of the SQL statement parameters . See the definition
of ` ` Type ` ` for more information about SQL types .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . spanner _ v1 . types . Type `
resume _ token ( bytes ) : If this request is resuming a previously interrupted SQL statement
execution , ` ` resume _ token ` ` should be copied from the last
` ` PartialResultSet ` ` yielded before the interruption . Doing this enables
the new SQL statement execution to resume where the last one left off .
The rest of the request parameters must exactly match the request that
yielded this token .
query _ mode ( ~ google . cloud . spanner _ v1 . types . QueryMode ) : Used to control the amount of debugging information returned in
` ` ResultSetStats ` ` . If ` ` partition _ token ` ` is set , ` ` query _ mode ` ` can
only be set to ` ` QueryMode . NORMAL ` ` .
partition _ token ( bytes ) : If present , results will be restricted to the specified partition
previously created using PartitionQuery ( ) . There must be an exact match
for the values of fields common to this message and the
PartitionQueryRequest message used to create this partition \ _ token .
seqno ( long ) : A per - transaction sequence number used to identify this request . This
makes each request idempotent such that if the request is received multiple
times , at most one will succeed .
The sequence number must be monotonically increasing within the
transaction . If a request arrives for the first time with an out - of - order
sequence number , the transaction may be aborted . Replays of previously
handled requests will yield the same response as the first execution .
Required for DML statements . Ignored for queries .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
Iterable [ ~ google . cloud . spanner _ v1 . types . PartialResultSet ] .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "execute_streaming_sql" not in self . _inner_api_calls :
self . _inner_api_calls [ "execute_streaming_sql" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . execute_streaming_sql , default_retry = self . _method_configs [ "ExecuteStreamingSql" ] . retry , default_timeout = self . _method_configs [ "ExecuteStreamingSql" ] . timeout , client_info = self . _client_info , )
request = spanner_pb2 . ExecuteSqlRequest ( session = session , sql = sql , transaction = transaction , params = params , param_types = param_types , resume_token = resume_token , query_mode = query_mode , partition_token = partition_token , seqno = seqno , )
if metadata is None :
metadata = [ ]
metadata = list ( metadata )
try :
routing_header = [ ( "session" , session ) ]
except AttributeError :
pass
else :
routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( routing_header )
metadata . append ( routing_metadata )
return self . _inner_api_calls [ "execute_streaming_sql" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def _thread_init ( cls ) :
"""Ensure thread local is initialized ."""
|
if not hasattr ( cls . _local , '_in_order_futures' ) :
cls . _local . _in_order_futures = set ( )
cls . _local . _activated = False
|
def segmentlistdict_from_short_string ( s , boundtype = int ) :
"""Parse a string representation of a set of named segmentlists into a
segmentlistdict object . The string encoding is that generated by
segmentlistdict _ to _ short _ string ( ) . The optional boundtype argument
will be passed to from _ range _ strings ( ) when parsing the segmentlist
objects from the string .
Example :
> > > segmentlistdict _ from _ short _ string ( " H1 = 0:10,35,100 : / L1 = 5:15,45:60 " )
{ ' H1 ' : [ segment ( 0 , 10 ) , segment ( 35 , 35 ) , segment ( 100 , infinity ) ] , ' L1 ' : [ segment ( 5 , 15 ) , segment ( 45 , 60 ) ] }
This function , and its inverse segmentlistdict _ to _ short _ string ( ) ,
are intended to be used to allow small segmentlistdict objects to
be encoded in command line options and config files . For large
segmentlistdict objects or when multiple sets of segmentlists are
required , the LIGO Light Weight XML encoding available through the
pycbc _ glue . ligolw library should be used ."""
|
d = segments . segmentlistdict ( )
for token in s . strip ( ) . split ( "/" ) :
key , ranges = token . strip ( ) . split ( "=" )
d [ key . strip ( ) ] = from_range_strings ( ranges . strip ( ) . split ( "," ) , boundtype = boundtype )
return d
|
def fit ( self , X ) :
"""Apply KMeans Clustering
X : dataset with feature vectors"""
|
self . centers_ , self . labels_ , self . sse_arr_ , self . n_iter_ = _kmeans ( X , self . n_clusters , self . max_iter , self . n_trials , self . tol )
|
def get_scope_by_name ( self , scope_name ) :
"""GetScopeByName .
[ Preview API ]
: param str scope _ name :
: rtype : : class : ` < IdentityScope > < azure . devops . v5_0 . identity . models . IdentityScope > `"""
|
query_parameters = { }
if scope_name is not None :
query_parameters [ 'scopeName' ] = self . _serialize . query ( 'scope_name' , scope_name , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '4e11e2bf-1e79-4eb5-8f34-a6337bd0de38' , version = '5.0-preview.2' , query_parameters = query_parameters )
return self . _deserialize ( 'IdentityScope' , response )
|
def getChain ( self ) :
"returns a list of keys representing the chain of documents"
|
l = [ ]
h = self . head
while h :
l . append ( h . _key )
h = h . nextDoc
return l
|
def asDictionary ( self ) :
"""returns the object as a dictionary"""
|
template = { "type" : "esriPMS" , "url" : self . _url , "imageData" : self . _imageDate , "contentType" : self . _contentType , "width" : self . _width , "height" : self . _height , "angle" : self . _angle , "xoffset" : self . _xoffset , "yoffset" : self . _yoffset , "xscale" : self . _xscale , "yscale" : self . _yscale , "outline" : self . _outline }
return template
|
def annotate_proto ( self , text , annotators = None ) :
"""Return a Document protocol buffer from the CoreNLP server , containing annotations of the text .
: param ( str ) text : text to be annotated
: param ( list [ str ] ) annotators : a list of annotator names
: return ( CoreNLP _ pb2 . Document ) : a Document protocol buffer"""
|
properties = { 'annotators' : ',' . join ( annotators or self . default_annotators ) , 'outputFormat' : 'serialized' , 'serializer' : 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer' }
r = self . _request ( text , properties )
buffer = r . content
# bytes
size , pos = _DecodeVarint ( buffer , 0 )
buffer = buffer [ pos : ( pos + size ) ]
doc = CoreNLP_pb2 . Document ( )
doc . ParseFromString ( buffer )
return doc
|
def new_connection ( self , remote_ip , remote_port ) :
"""This method is called when a new SMTP session is opened .
[ PUBLIC API ]"""
|
self . state . set_state ( 'new' )
self . _message = Message ( Peer ( remote_ip , remote_port ) )
decision , response_sent = self . is_allowed ( 'accept_new_connection' , self . _message . peer )
if decision :
if not response_sent :
self . handle_input ( 'greet' )
self . _set_size_restrictions ( )
else :
if not response_sent :
self . reply ( 554 , 'SMTP service not available' )
self . close_connection ( )
|
def wait_until_exit ( self ) :
"""Wait until thread exit
Used for testing purpose only"""
|
if self . _timeout is None :
raise Exception ( "Thread will never exit. Use stop or specify timeout when starting it!" )
self . _thread . join ( )
self . stop ( )
|
def mark_dmag_rec ( s , ind , data ) :
"""Edits demagnetization data to mark " bad " points with measurement _ flag"""
|
datablock = [ ]
for rec in data :
if rec [ 'er_specimen_name' ] == s :
meths = rec [ 'magic_method_codes' ] . split ( ':' )
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths :
datablock . append ( rec )
dmagrec = datablock [ ind ]
for k in range ( len ( data ) ) :
meths = data [ k ] [ 'magic_method_codes' ] . split ( ':' )
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths :
if data [ k ] [ 'er_specimen_name' ] == s :
if data [ k ] [ 'treatment_temp' ] == dmagrec [ 'treatment_temp' ] and data [ k ] [ 'treatment_ac_field' ] == dmagrec [ 'treatment_ac_field' ] :
if data [ k ] [ 'measurement_dec' ] == dmagrec [ 'measurement_dec' ] and data [ k ] [ 'measurement_inc' ] == dmagrec [ 'measurement_inc' ] and data [ k ] [ 'measurement_magn_moment' ] == dmagrec [ 'measurement_magn_moment' ] :
if data [ k ] [ 'measurement_flag' ] == 'g' :
flag = 'b'
else :
flag = 'g'
data [ k ] [ 'measurement_flag' ] = flag
break
return data
|
def get_command ( self , ctx , cmd_name ) :
"""gets the subcommands under the service name
Parameters
ctx : Context
the context object passed into the method
cmd _ name : str
the service name
Returns
EventTypeSubCommand :
returns subcommand if successful , None if not ."""
|
if cmd_name not in self . all_cmds :
return None
return EventTypeSubCommand ( self . events_lib , cmd_name , self . all_cmds [ cmd_name ] )
|
def on_lstSubcategories_itemSelectionChanged ( self ) :
"""Update subcategory description label .
. . note : : This is an automatic Qt slot
executed when the subcategory selection changes ."""
|
self . clear_further_steps ( )
# Set widgets
subcategory = self . selected_subcategory ( )
# Exit if no selection
if not subcategory :
return
# Set description label
self . lblDescribeSubcategory . setText ( subcategory [ 'description' ] )
icon_path = get_image_path ( subcategory )
self . lblIconSubcategory . setPixmap ( QPixmap ( icon_path ) )
# Enable the next button
self . parent . pbnNext . setEnabled ( True )
|
def indexes ( self , recurse = True ) :
"""Returns the list of indexes that are associated with this schema .
: return [ < orb . Index > , . . ]"""
|
output = self . __indexes . copy ( )
if recurse and self . inherits ( ) :
schema = orb . system . schema ( self . inherits ( ) )
if not schema :
raise orb . errors . ModelNotFound ( schema = self . inherits ( ) )
else :
output . update ( schema . indexes ( recurse = recurse ) )
return output
|
def fem ( ab , off , angle , zsrc , zrec , lsrc , lrec , depth , freq , etaH , etaV , zetaH , zetaV , xdirect , isfullspace , ht , htarg , use_ne_eval , msrc , mrec , loop_freq , loop_off , conv = True ) :
r"""Return the electromagnetic frequency - domain response .
This function is called from one of the above modelling routines . No
input - check is carried out here . See the main description of : mod : ` model `
for information regarding input and output parameters .
This function can be directly used if you are sure the provided input is in
the correct format . This is useful for inversion routines and similar , as
it can speed - up the calculation by omitting input - checks ."""
|
# Preallocate array
fEM = np . zeros ( ( freq . size , off . size ) , dtype = complex )
# Initialize kernel count
# ( how many times the wavenumber - domain kernel was calld )
kcount = 0
# If < ab > = 36 ( or 63 ) , fEM - field is zero
if ab in [ 36 , ] :
return fEM , kcount , conv
# Get full - space - solution if xdirect = True and model is a full - space or
# if src and rec are in the same layer .
if xdirect and ( isfullspace or lsrc == lrec ) :
fEM += kernel . fullspace ( off , angle , zsrc , zrec , etaH [ : , lrec ] , etaV [ : , lrec ] , zetaH [ : , lrec ] , zetaV [ : , lrec ] , ab , msrc , mrec )
# If ` xdirect = None ` we set it here to True , so it is NOT calculated in
# the wavenumber domain . ( Only reflected fields are returned . )
if xdirect is None :
xdir = True
else :
xdir = xdirect
# Get angle dependent factors
factAng = kernel . angle_factor ( angle , ab , msrc , mrec )
# Compute required lambdas for given hankel - filter - base
# This should be in utils , but this is a backwards - incompatible change .
# Move this to utils for version 2.0.
if ht == 'fht' : # htarg [ 0 ] = filter ; htarg [ 1 ] = pts _ per _ dec
lambd , int_pts = transform . get_spline_values ( htarg [ 0 ] , off , htarg [ 1 ] )
if not loop_off :
htarg = ( htarg [ 0 ] , htarg [ 1 ] , lambd , int_pts )
# If not full - space with xdirect calculate fEM - field
if not isfullspace * xdir :
calc = getattr ( transform , ht )
if loop_freq :
for i in range ( freq . size ) :
out = calc ( zsrc , zrec , lsrc , lrec , off , factAng , depth , ab , etaH [ None , i , : ] , etaV [ None , i , : ] , zetaH [ None , i , : ] , zetaV [ None , i , : ] , xdir , htarg , use_ne_eval , msrc , mrec )
fEM [ None , i , : ] += out [ 0 ]
kcount += out [ 1 ]
conv *= out [ 2 ]
elif loop_off :
for i in range ( off . size ) : # See comments above where it says " ht = = ' fht ' " .
# Get pre - calculated lambd , int _ pts for this offset
if ht == 'fht' :
htarg = ( htarg [ 0 ] , htarg [ 1 ] , lambd [ None , i , : ] , int_pts [ i ] )
out = calc ( zsrc , zrec , lsrc , lrec , off [ None , i ] , factAng [ None , i ] , depth , ab , etaH , etaV , zetaH , zetaV , xdir , htarg , use_ne_eval , msrc , mrec )
fEM [ : , None , i ] += out [ 0 ]
kcount += out [ 1 ]
conv *= out [ 2 ]
else :
out = calc ( zsrc , zrec , lsrc , lrec , off , factAng , depth , ab , etaH , etaV , zetaH , zetaV , xdir , htarg , use_ne_eval , msrc , mrec )
fEM += out [ 0 ]
kcount += out [ 1 ]
conv *= out [ 2 ]
return fEM , kcount , conv
|
def subtract ( self , other = None , ** kwargs ) :
"""Elements are subtracted from an * iterable * or from another
* mapping * ( or counter ) . Like : func : ` dict . update ` but subtracts
counts instead of replacing them ."""
|
if other is not None :
if self . _same_redis ( other , RedisCollection ) :
self . _update_helper ( other , operator . sub , use_redis = True )
elif hasattr ( other , 'keys' ) :
self . _update_helper ( other , operator . sub )
else :
self . _update_helper ( collections . Counter ( other ) , operator . sub )
if kwargs :
self . _update_helper ( kwargs , operator . sub )
|
def update_dataset ( self , dataStr , flatten = False ) :
'''update class with a data structure .
: keyword flatten : use this to automatically flatten variables ( squeeze dimensions )'''
|
# Load keys and dimensions
dataDim = dataStr . pop ( '_dimensions' , { } )
attrStr = dataStr . pop ( '_attributes' , { } )
ndims = dataDim . pop ( '_ndims' , 0 )
dimensions = [ dataDim . keys ( ) , dataDim . values ( ) ]
keys = dataStr . keys ( )
if len ( keys ) == 0 :
self . warning ( 2 , 'No data loaded' )
return
self . message ( 2 , 'Loaded variables : ' + str ( keys ) )
# Check what is the current variable type
isStructure = True if isinstance ( dataStr [ keys [ 0 ] ] , dict ) else False
# datalen = [ np . size ( dataStr [ key ] ) for key in keys ]
datalen = [ list ( np . shape ( dataStr [ key ] [ 'data' ] ) [ : : - 1 ] ) for key in keys ] if isStructure else [ list ( np . shape ( dataStr [ key ] ) [ : : - 1 ] ) for key in keys ]
# Shape is inverted wrt to order of dimensions to be consistent with check _ variable
if isStructure :
varDim = [ list ( dataStr [ key ] [ '_dimensions' ] ) [ 1 : ] for key in keys ]
ind = [ where_list ( vDim , dimensions [ 0 ] ) for vDim in varDim ]
# Dimensions indices from actual variables ' dimensions
# Check dimension lengths
# dimOk = np . array ( [ enum [ 1 ] [ 0 ] = = dimensions [ 1 ] [ ind [ enum [ 0 ] ] [ 0 ] ] for enum in enumerate ( datalen ) ] )
dimOk = [ any ( [ enum [ 1 ] [ ii ] == dimensions [ 1 ] [ jj ] for ii , jj in enumerate ( ind [ enum [ 0 ] ] ) ] ) for enum in enumerate ( datalen ) ]
if any ( [ not d for d in dimOk ] ) :
notOk = np . where ( ~ np . array ( dimOk ) ) [ 0 ]
print datalen
self . Error ( 'Problem with {0} variables : {1}' . format ( len ( notOk ) , ',' . join ( np . array ( dataStr . keys ( ) ) [ notOk ] ) ) )
else :
ind = [ where_list ( dlen , dimensions [ 1 ] ) for dlen in datalen ]
# Dimensions indices from variable length
if ( np . array ( ind ) . sum ( ) == - 1 ) != 0 :
self . Error ( 'At least one variable have not been properly defined' )
dimname = [ np . array ( dimensions [ 0 ] ) [ i ] . tolist ( ) for i in ind ]
# Get correspondance between data structure dimensions and variables
curDim , nself = self . get_currentDim ( )
createDim = np . array ( [ np . array ( [ w == - 1 for w in where_list ( j , curDim [ 0 ] ) ] ) for i , j in enumerate ( dimname ) ] )
createDim = np . squeeze ( createDim )
# curInd = atools . where _ list ( dimname _ reduced , curDim [ 0 ] ) # Get correspondance between data structure dimensions and object dimensions
# createDim = ( np . array ( curInd ) = = - 1 ) # Get dimensions to be created
toCreate = np . array ( [ not self . __dict__ . has_key ( key ) for key in keys ] )
updateDim = [ ]
self . message ( 2 , 'Updating object with ' + str ( [ '{0}({1}:{2})' . format ( i [ 0 ] , i [ 1 ] , i [ 2 ] ) for i in zip ( * ( keys , dimname , datalen ) ) ] ) )
# Update variables available in files
for enum in enumerate ( keys ) :
ind = enum [ 0 ]
key = enum [ 1 ]
# Load variable
# var = dataStr . get ( key )
dum = dataStr . get ( key ) . pop ( 'data' ) if isStructure else copy . deepcopy ( dataStr . get ( key ) )
if flatten :
if isinstance ( dum , dict ) :
dum [ 'data' ] = dum [ 'data' ] . flatten ( )
else :
dum = dum . flatten ( )
if not isStructure :
dum = { '_dimensions' : dum . _dimensions if hasattr ( dum , '_dimensions' ) else { } , '_attributes' : dum . _attributes if hasattr ( dum , '_attributes' ) else { } , 'data' : dum }
else :
dumStr = dataStr . get ( key )
dumStr . update ( { 'data' : dum } )
dum = dumStr
dumDim = dimStr ( dimname [ ind ] , datalen [ ind ] )
# if dataStr [ key ] . has _ key ( ' _ attributes ' ) :
# dum . update ( dataStr [ key ] [ ' _ attributes ' ] )
# if isinstance ( dum , np . ma . masked _ array ) :
# # Get associated dimensions
# datalen = datalen [ ind ] # [ len ( dataStr [ key ] ) for key in keys ]
# ind = atools . where _ list ( [ datalen ] , dimensions [ 1 ] ) [ 0]
# if ( ind = = - 1 ) : self . Error ( ' Dimensions of current variable ( ' + key + ' ) have not been properly defined ' )
# dimname = dimensions [
# Initialize variable if required
# if toCreate :
# updateDim . append ( self . create _ Variable ( key , dum , dimensions = { dimname [ ind ] : datalen [ ind ] } , toCreate = toCreate [ ind ] , createDim = createDim [ ind ] ) )
updateDim . append ( self . create_Variable ( key , dum , dimensions = dumDim , toCreate = toCreate [ ind ] , createDim = createDim [ ind ] ) )
# Extend missing variables
# missing _ _ keys = list ( set ( self . par _ list ) . difference ( keys ) )
# for enum in enumerate ( missing _ _ keys ) :
# ind = enum [ 0]
# key = enum [ 1]
# updateDim . append ( self . create _ Variable ( key , np . ma . repeat ( self . dist _ to _ coast _ leuliette . fill _ value ) , dimensions = dumDim , toCreate = False , createDim = False ) )
# Final sequence
zipped_upd = zip ( * ( np . hstack ( dimname ) [ ~ np . hstack ( createDim ) ] , np . hstack ( datalen ) [ ~ np . hstack ( createDim ) ] ) )
updateDim_List = np . array ( list ( set ( tuple ( i ) for i in np . array ( zipped_upd , dtype = '|S16' ) . tolist ( ) ) ) )
# 2D unique
# updateDim _ List = np . unique ( np . array ( zipped _ upd , dtype = ' | S16 ' ) ) # [ str ( i ) for i in datalen ]
# if updateDim _ List . size > 0 : updateDim _ List . resize ( ( 2 , updateDim _ List . size / 2 ) )
# updateDim _ List = np . unique ( zip ( * ( np . array ( dimname ) [ ~ createDim ] , np . array ( datalen ) [ ~ createDim ] ) ) ) # [ str ( i ) for i in datalen ]
zipped_dims = zip ( * ( np . hstack ( dimname ) [ np . hstack ( createDim ) ] , np . hstack ( datalen ) [ np . hstack ( createDim ) ] ) )
createDim_list = np . array ( list ( set ( tuple ( i ) for i in np . array ( zipped_dims , dtype = '|S16' ) . tolist ( ) ) ) )
# 2D unique
# clist , inv = np . unique ( np . array ( zipped _ dims , dtype = ' | S16 ' ) , return _ inverse = True ) # RQ : THIS WILL FAIL IF NUMBERS HAVE MORE THAN 16 DIGITS # [ str ( i ) for i in datalen ]
# if createDim _ list . size > 0 : createDim _ list . resize ( ( 2 , createDim _ list . size / 2 ) )
# createDim _ list = np . unique ( zip ( * ( np . array ( dimname ) [ createDim ] , np . array ( datalen ) [ createDim ] ) ) ) # [ str ( i ) for i in datalen ]
for dname , dim in createDim_list :
self . create_Dim ( dname , np . int ( dim ) )
for dname , dim in updateDim_List :
self . update_Dim ( dname , np . int ( dim ) )
|
def parse_bowtie2_logs ( self , f ) :
"""Warning : This function may make you want to stab yourself .
Parse logs from bowtie2 . These miss several key bits of information
such as input files , so we try to look for logs from other wrapper tools
that may have logged this info . If not found , we default to using the filename .
Note that concatenated logs only parse if we have the command printed in there .
The bowtie log uses the same strings mulitple times in different contexts to mean
different things , making parsing very messy . Handle with care .
Example single - end output from bowtie2:
Time loading reference : 00:00:08
Time loading forward index : 00:00:16
Time loading mirror index : 00:00:09
[ samopen ] SAM header is present : 25 sequences .
Multiseed full - index search : 00:58:04
38377305 reads ; of these :
38377305 ( 100.00 % ) were unpaired ; of these :
2525577 ( 6.58 % ) aligned 0 times
27593593 ( 71.90 % ) aligned exactly 1 time
8258135 ( 21.52 % ) aligned > 1 times
93.42 % overall alignment rate
Time searching : 00:58:37
Overall time : 00:58:37
Example paired - end output from bowtie2:
Time loading reference : 00:01:07
Time loading forward index : 00:00:26
Time loading mirror index : 00:00:09
Multiseed full - index search : 01:32:55
15066949 reads ; of these :
15066949 ( 100.00 % ) were paired ; of these :
516325 ( 3.43 % ) aligned concordantly 0 times
11294617 ( 74.96 % ) aligned concordantly exactly 1 time
3256007 ( 21.61 % ) aligned concordantly > 1 times
516325 pairs aligned concordantly 0 times ; of these :
26692 ( 5.17 % ) aligned discordantly 1 time
489633 pairs aligned 0 times concordantly or discordantly ; of these :
979266 mates make up the pairs ; of these :
592900 ( 60.55 % ) aligned 0 times
209206 ( 21.36 % ) aligned exactly 1 time
177160 ( 18.09 % ) aligned > 1 times
98.03 % overall alignment rate
Time searching : 01:34:37
Overall time : 01:34:37"""
|
# Regexes
regexes = { 'unpaired' : { 'unpaired_aligned_none' : r"(\d+) \([\d\.]+%\) aligned 0 times" , 'unpaired_aligned_one' : r"(\d+) \([\d\.]+%\) aligned exactly 1 time" , 'unpaired_aligned_multi' : r"(\d+) \([\d\.]+%\) aligned >1 times" } , 'paired' : { 'paired_aligned_none' : r"(\d+) \([\d\.]+%\) aligned concordantly 0 times" , 'paired_aligned_one' : r"(\d+) \([\d\.]+%\) aligned concordantly exactly 1 time" , 'paired_aligned_multi' : r"(\d+) \([\d\.]+%\) aligned concordantly >1 times" , 'paired_aligned_discord_one' : r"(\d+) \([\d\.]+%\) aligned discordantly 1 time" , 'paired_aligned_discord_multi' : r"(\d+) \([\d\.]+%\) aligned discordantly >1 times" , 'paired_aligned_mate_one' : r"(\d+) \([\d\.]+%\) aligned exactly 1 time" , 'paired_aligned_mate_multi' : r"(\d+) \([\d\.]+%\) aligned >1 times" , 'paired_aligned_mate_none' : r"(\d+) \([\d\.]+%\) aligned 0 times" } }
# Go through log file line by line
s_name = f [ 's_name' ]
parsed_data = { }
for l in f [ 'f' ] : # Attempt in vain to find original bowtie2 command , logged by another program
btcmd = re . search ( r"bowtie2 .+ -[1U] ([^\s,]+)" , l )
if btcmd :
s_name = self . clean_s_name ( btcmd . group ( 1 ) , f [ 'root' ] )
log . debug ( "Found a bowtie2 command, updating sample name to '{}'" . format ( s_name ) )
# Total reads
total = re . search ( r"(\d+) reads; of these:" , l )
if total :
parsed_data [ 'total_reads' ] = int ( total . group ( 1 ) )
# Single end reads
unpaired = re . search ( r"(\d+) \([\d\.]+%\) were unpaired; of these:" , l )
if unpaired :
parsed_data [ 'unpaired_total' ] = int ( unpaired . group ( 1 ) )
self . num_se += 1
# Do nested loop whilst we have this level of indentation
l = f [ 'f' ] . readline ( )
while l . startswith ( ' ' ) :
for k , r in regexes [ 'unpaired' ] . items ( ) :
match = re . search ( r , l )
if match :
parsed_data [ k ] = int ( match . group ( 1 ) )
l = f [ 'f' ] . readline ( )
# Paired end reads
paired = re . search ( r"(\d+) \([\d\.]+%\) were paired; of these:" , l )
if paired :
parsed_data [ 'paired_total' ] = int ( paired . group ( 1 ) )
self . num_pe += 1
# Do nested loop whilst we have this level of indentation
l = f [ 'f' ] . readline ( )
while l . startswith ( ' ' ) :
for k , r in regexes [ 'paired' ] . items ( ) :
match = re . search ( r , l )
if match :
parsed_data [ k ] = int ( match . group ( 1 ) )
l = f [ 'f' ] . readline ( )
# Overall alignment rate
overall = re . search ( r"([\d\.]+)% overall alignment rate" , l )
if overall :
parsed_data [ 'overall_alignment_rate' ] = float ( overall . group ( 1 ) )
# End of log section
# Save half ' pairs ' of mate counts
m_keys = [ 'paired_aligned_mate_multi' , 'paired_aligned_mate_none' , 'paired_aligned_mate_one' ]
for k in m_keys :
if k in parsed_data :
parsed_data [ '{}_halved' . format ( k ) ] = float ( parsed_data [ k ] ) / 2.0
# Save parsed data
if s_name in self . bowtie2_data :
log . debug ( "Duplicate sample name found! Overwriting: {}" . format ( s_name ) )
self . add_data_source ( f , s_name )
self . bowtie2_data [ s_name ] = parsed_data
# Reset in case we find more in this log file
s_name = f [ 's_name' ]
parsed_data = { }
|
def _report_self ( self ) :
"""Reports the crawler uuid to redis"""
|
if self . redis_connected :
self . logger . debug ( "Reporting self to redis" )
try :
key = "stats:rest:self:{m}:{u}" . format ( m = socket . gethostname ( ) , u = self . my_uuid )
self . redis_conn . set ( key , self . get_time ( ) )
self . redis_conn . expire ( key , self . settings [ 'HEARTBEAT_TIMEOUT' ] )
except ConnectionError :
self . logger . error ( "Lost connection to Redis" )
self . _spawn_redis_connection_thread ( )
else :
self . logger . warn ( "Cannot report self to redis, not connected" )
|
def accumulate_from_superclasses ( cls , propname ) :
'''Traverse the class hierarchy and accumulate the special sets of names
` ` MetaHasProps ` ` stores on classes :
Args :
name ( str ) : name of the special attribute to collect .
Typically meaningful values are : ` ` _ _ container _ props _ _ ` ` ,
` ` _ _ properties _ _ ` ` , ` ` _ _ properties _ with _ refs _ _ ` `'''
|
cachename = "__cached_all" + propname
# we MUST use cls . _ _ dict _ _ NOT hasattr ( ) . hasattr ( ) would also look at base
# classes , and the cache must be separate for each class
if cachename not in cls . __dict__ :
s = set ( )
for c in inspect . getmro ( cls ) :
if issubclass ( c , HasProps ) and hasattr ( c , propname ) :
base = getattr ( c , propname )
s . update ( base )
setattr ( cls , cachename , s )
return cls . __dict__ [ cachename ]
|
def replaceelement ( oldelem , newelem ) :
'''Given a parent element , replace oldelem with newelem .'''
|
parent = oldelem . getparent ( )
if parent is not None :
size = len ( parent . getchildren ( ) )
for x in range ( 0 , size ) :
if parent . getchildren ( ) [ x ] == oldelem :
parent . remove ( oldelem )
parent . insert ( x , newelem )
|
def token_name ( tokens , expected ) :
"""Match a token name ( type ) ."""
|
try :
token = next ( iter ( tokens ) )
except StopIteration :
return
if token and token . name == expected :
return TokenMatch ( None , token . value , ( token , ) )
|
def run ( self , allow_interactive = True ) :
"""Enter the native GUI event loop .
Parameters
allow _ interactive : bool
Is the application allowed to handle interactive mode for console
terminals ? By default , typing ` ` python - i main . py ` ` results in
an interactive shell that also regularly calls the VisPy event
loop . In this specific case , the run ( ) function will terminate
immediately and rely on the interpreter ' s input loop to be run
after script execution ."""
|
if allow_interactive and self . is_interactive ( ) :
inputhook . set_interactive ( enabled = True , app = self )
else :
return self . _backend . _vispy_run ( )
|
def logout_callback ( ) :
"""Route called by the OpenID provider when user logs out .
Clear the cookies here ."""
|
resp = make_response ( 'Logging Out' )
resp . set_cookie ( 'sub' , 'null' , expires = 0 )
resp . set_cookie ( 'session_id' , 'null' , expires = 0 )
return resp
|
def fix_docs ( cls ) :
"""copies docstrings of derived attributes ( methods , properties , attrs ) from parent classes ."""
|
import inspect
public_undocumented_members = { name : func for name , func in inspect . getmembers ( cls ) if not name . startswith ( '_' ) and func . __doc__ is None }
for name , func in public_undocumented_members . items ( ) :
for parent in cls . __mro__ [ 1 : ] :
parfunc = getattr ( parent , name , None )
if parfunc and getattr ( parfunc , '__doc__' , None ) :
if isinstance ( func , property ) : # copy property , since its doc attribute is read - only
new_prop = property ( fget = func . fget , fset = func . fset , fdel = func . fdel , doc = parfunc . __doc__ )
setattr ( cls , name , new_prop )
else :
if hasattr ( func , '__func__' ) : # handle instancemethods
func . __func__ . __doc__ = parfunc . __doc__
else :
func . __doc__ = parfunc . __doc__
break
return cls
|
def withNamedBits ( cls , ** values ) :
"""Creates a subclass with discreet named bits constraint .
Reduce fully duplicate enumerations along the way ."""
|
enums = set ( cls . namedValues . items ( ) )
enums . update ( values . items ( ) )
class X ( cls ) :
namedValues = namedval . NamedValues ( * enums )
X . __name__ = cls . __name__
return X
|
def set_level ( self , position , channel = None ) :
"""Seek a specific value by specifying a float ( ) from 0.0 to 1.0."""
|
try :
position = float ( position )
except Exception as err :
LOG . debug ( "HelperLevel.set_level: Exception %s" % ( err , ) )
return False
self . writeNodeData ( "LEVEL" , position , channel )
|
def expose_finish ( self , * args ) :
"""Finish drawing process"""
|
# Obtain a reference to the OpenGL drawable
# and rendering context .
gldrawable = self . get_gl_drawable ( )
# glcontext = self . get _ gl _ context ( )
if not gldrawable :
return
# Put the buffer on the screen !
if gldrawable . is_double_buffered ( ) :
gldrawable . swap_buffers ( )
else :
glFlush ( )
# OpenGL end
gldrawable . gl_end ( )
|
def LDRSH ( self , params ) :
"""LDRSH Ra , [ Rb , Rc ]
Load a half word from memory , sign extend , and put into Ra
Ra , Rb , and Rc must be low registers"""
|
# TODO LDRSH cant use immediates
Ra , Rb , Rc = self . get_three_parameters ( self . THREE_PARAMETER_WITH_BRACKETS , params )
self . check_arguments ( low_registers = ( Ra , Rb , Rc ) )
def LDRSH_func ( ) : # TODO does memory read up ?
if ( self . register [ Rb ] + self . register [ Rc ] ) % 2 != 0 :
raise iarm . exceptions . HardFault ( "Memory access not half word aligned\nR{}: {}\nR{}: {}" . format ( Rb , self . register [ Rb ] , Rc , self . register [ Rc ] ) )
self . register [ Ra ] = 0
for i in range ( 2 ) :
self . register [ Ra ] |= ( self . memory [ self . register [ Rb ] + self . register [ Rc ] + i ] << ( 8 * i ) )
if self . register [ Ra ] & ( 1 << 15 ) :
self . register [ Ra ] |= ( 0xFFFF << 16 )
return LDRSH_func
|
def remove_config_to_machine_group ( self , project_name , config_name , group_name ) :
"""remove a logtail config to a machine group
Unsuccessful opertaion will cause an LogException .
: type project _ name : string
: param project _ name : the Project name
: type config _ name : string
: param config _ name : the logtail config name to apply
: type group _ name : string
: param group _ name : the machine group name
: return : RemoveConfigToMachineGroupResponse
: raise : LogException"""
|
headers = { }
params = { }
resource = "/machinegroups/" + group_name + "/configs/" + config_name
( resp , header ) = self . _send ( "DELETE" , project_name , None , resource , params , headers )
return RemoveConfigToMachineGroupResponse ( header , resp )
|
def register_model_resource ( self , resource : ModelResource ) :
"""Method to manually register a : class : ` ModelResource ` with APISpec .
: param resource :"""
|
model_name = resource . Meta . model . __name__
self . spec . add_tag ( { 'name' : model_name , 'description' : resource . Meta . model . __doc__ , } )
for method in resource . methods ( ) :
key = f'{resource.__name__}.{method}'
if key not in unchained . controller_bundle . controller_endpoints :
continue
docs = { }
http_method = method
if method == CREATE :
http_method = 'post'
docs [ http_method ] = dict ( parameters = [ { 'in' : __location_map__ [ 'json' ] , 'required' : True , 'schema' : resource . Meta . serializer_create , } ] , responses = { '201' : dict ( description = getattr ( resource , CREATE ) . __doc__ , schema = resource . Meta . serializer_create ) , } , )
elif method == DELETE :
docs [ http_method ] = dict ( parameters = [ ] , responses = { '204' : dict ( description = getattr ( resource , DELETE ) . __doc__ ) , } , )
elif method == GET :
docs [ http_method ] = dict ( parameters = [ ] , responses = { '200' : dict ( description = getattr ( resource , GET ) . __doc__ , schema = resource . Meta . serializer ) , } , )
elif method == LIST :
http_method = 'get'
docs [ http_method ] = dict ( parameters = [ ] , responses = { '200' : dict ( description = getattr ( resource , LIST ) . __doc__ , schema = resource . Meta . serializer_many ) , } , )
elif method == PATCH :
docs [ http_method ] = dict ( parameters = [ { 'in' : __location_map__ [ 'json' ] , 'required' : False , 'schema' : resource . Meta . serializer , } ] , responses = { '200' : dict ( description = getattr ( resource , PATCH ) . __doc__ , schema = resource . Meta . serializer ) , } , )
elif method == PUT :
docs [ http_method ] = dict ( parameters = [ { 'in' : __location_map__ [ 'json' ] , 'required' : True , 'schema' : resource . Meta . serializer , } ] , responses = { '200' : dict ( description = getattr ( resource , PUT ) . __doc__ , schema = resource . Meta . serializer ) , } , )
docs [ http_method ] [ 'tags' ] = [ model_name ]
display_name = title_case ( model_name )
if method == LIST :
display_name = pluralize ( display_name )
docs [ http_method ] [ 'summary' ] = f'{http_method.upper()} {display_name}'
routes = unchained . controller_bundle . controller_endpoints [ key ]
for route in routes :
for rule in self . app . url_map . iter_rules ( route . endpoint ) :
self . spec . add_path ( app = self . app , rule = rule , operations = docs , view = route . view_func )
|
def _cache_index ( self , dbname , collection , index , cache_for ) :
"""Add an index to the index cache for ensure _ index operations ."""
|
now = datetime . datetime . utcnow ( )
expire = datetime . timedelta ( seconds = cache_for ) + now
with self . __index_cache_lock :
if dbname not in self . __index_cache :
self . __index_cache [ dbname ] = { }
self . __index_cache [ dbname ] [ collection ] = { }
self . __index_cache [ dbname ] [ collection ] [ index ] = expire
elif collection not in self . __index_cache [ dbname ] :
self . __index_cache [ dbname ] [ collection ] = { }
self . __index_cache [ dbname ] [ collection ] [ index ] = expire
else :
self . __index_cache [ dbname ] [ collection ] [ index ] = expire
|
def _get_description ( self , args : Tuple , kwargs : Dict [ str , Any ] ) -> Dict [ str , Any ] :
"""Return the dictionary to be sent to the queue ."""
|
return { 'id' : uuid1 ( ) . hex , 'args' : args , 'kwargs' : kwargs , 'module' : self . _module_name , 'function' : self . f . __name__ , 'sender_hostname' : socket . gethostname ( ) , 'sender_pid' : os . getpid ( ) , 'sender_cmd' : ' ' . join ( sys . argv ) , 'sender_timestamp' : datetime . utcnow ( ) . isoformat ( ) [ : 19 ] , }
|
def clearCache ( ) :
"""Clears any cached data we have stored about specific engine versions"""
|
if os . path . exists ( CachedDataManager . _cacheDir ( ) ) == True :
shutil . rmtree ( CachedDataManager . _cacheDir ( ) )
|
def software_fibonacci ( n ) :
"""a normal old python function to return the Nth fibonacci number ."""
|
a , b = 0 , 1
for i in range ( n ) :
a , b = b , a + b
return a
|
def phi_inv ( p ) :
"""phi _ inv : inverse of gaussian ( normal ) CDF
Source :
Handbook of Mathematical Functions
Dover Books on Mathematics
Milton Abramowitz and Irene A . Stegun ( Editors )
Formula 26.2.23."""
|
if p < 0.5 :
t = math . sqrt ( - 2.0 * math . log ( p ) )
return ( ( 0.010328 * t + 0.802853 ) * t + 2.515517 ) / ( ( ( 0.001308 * t + 0.189269 ) * t + 1.432788 ) * t + 1.0 ) - t
else :
t = math . sqrt ( - 2.0 * math . log ( 1.0 - p ) )
return t - ( ( 0.010328 * t + 0.802853 ) * t + 2.515517 ) / ( ( ( 0.001308 * t + 0.189269 ) * t + 1.432788 ) * t + 1.0 )
|
def set_weekly ( self , interval , * , days_of_week , first_day_of_week , ** kwargs ) :
"""Set to repeat every week on specified days for every x no . of days
: param int interval : no . of days to repeat at
: param str first _ day _ of _ week : starting day for a week
: param list [ str ] days _ of _ week : list of days of the week to repeat
: keyword date start : Start date of repetition ( kwargs )
: keyword date end : End date of repetition ( kwargs )
: keyword int occurrences : no of occurrences ( kwargs )"""
|
self . set_daily ( interval , ** kwargs )
self . __days_of_week = set ( days_of_week )
self . __first_day_of_week = first_day_of_week
|
def set_session_token ( self , session_token ) :
"""Sets session token and new login time .
: param str session _ token : Session token from request ."""
|
self . session_token = session_token
self . _login_time = datetime . datetime . now ( )
|
def end_timing ( self ) :
"""Ends timing of an execution block , calculates elapsed time and updates the associated counter ."""
|
if self . _callback != None :
elapsed = time . perf_counter ( ) * 1000 - self . _start
self . _callback . end_timing ( self . _counter , elapsed )
|
def get_path ( self ) :
'''获取选择的路径 , 如果没有选择 , 就返回根目录'''
|
model , tree_iter = self . selection . get_selected ( )
if not tree_iter :
return '/'
else :
return model [ tree_iter ] [ PATH_COL ]
|
def summary_reporter ( self ) :
"""Parse individual MOB Recon reports into a summary report"""
|
logging . info ( 'Creating MOB-recon summary report' )
with open ( os . path . join ( self . reportpath , 'mob_recon_summary.csv' ) , 'w' ) as summary :
data = 'Strain,Location,Contig,Incompatibility,IncompatibilityAccession,RelaxaseType,' 'MashNearestNeighbor,MashNeighborDistance\n'
for sample in self . metadata : # Initialise a dictionary to store results for the COWBAT final report
sample [ self . analysistype ] . pipelineresults = dict ( )
for primarykey , results in sample [ self . analysistype ] . report_dict . items ( ) : # Only process results if they are not calculated to be chromosomal
if results [ 'cluster_id' ] != 'chromosome' :
data += ',' . join ( str ( result ) . replace ( ',' , ';' ) if str ( result ) != 'nan' else 'ND' for result in [ sample . name , results [ 'cluster_id' ] , results [ 'contig_id' ] . split ( '|' ) [ 1 ] , results [ 'rep_type' ] , results [ 'rep_type_accession' ] , results [ 'relaxase_type' ] , results [ 'mash_nearest_neighbor' ] , results [ 'mash_neighbor_distance' ] ] )
data += '\n'
# Add the calculated incompatibility to the pipeline results for use in the final COWBAT report
sample [ self . analysistype ] . pipelineresults [ results [ 'cluster_id' ] ] = ';' . join ( str ( result ) . replace ( ',' , ';' ) if str ( result ) != 'nan' else 'ND' for result in [ results [ 'rep_type' ] ] )
summary . write ( data )
|
def create ( window , root ) :
"""Create a notification object .
Args :
window ( : py : class : ` BrowserWindow ` ) : Window object this region
appears in .
root
( : py : class : ` ~ selenium . webdriver . remote . webelement . WebElement ` ) :
WebDriver element object that serves as the root for the
notification .
Returns :
: py : class : ` BaseNotification ` : Firefox notification ."""
|
notifications = { }
_id = root . get_property ( "id" )
from foxpuppet . windows . browser . notifications import addons
notifications . update ( addons . NOTIFICATIONS )
return notifications . get ( _id , BaseNotification ) ( window , root )
|
def _warn ( self , problem , kind = BrotherQLRasterError ) :
"""Logs the warning message ` problem ` or raises a
` BrotherQLRasterError ` exception ( changeable via ` kind ` )
if ` self . exception _ on _ warning ` is set to True .
: raises BrotherQLRasterError : Or other exception set via the ` kind ` keyword argument ."""
|
if self . exception_on_warning :
raise kind ( problem )
else :
logger . warning ( problem )
|
def presence_handler ( type_ , from_ ) :
"""Register the decorated function as presence stanza handler .
: param type _ : Presence type to listen for
: type type _ : : class : ` ~ . PresenceType `
: param from _ : Sender JIDs to listen for
: type from _ : : class : ` aioxmpp . JID ` or : data : ` None `
: raise TypeError : if the decorated object is a coroutine function
. . seealso : :
: meth : ` ~ . StanzaStream . register _ presence _ callback `
for more details on the ` type _ ` and ` from _ ` arguments
. . versionchanged : : 0.9
This is now based on
: class : ` aioxmpp . dispatcher . SimplePresenceDispatcher ` ."""
|
def decorator ( f ) :
if asyncio . iscoroutinefunction ( f ) :
raise TypeError ( "presence_handler must not be a coroutine function" )
aioxmpp . service . add_handler_spec ( f , aioxmpp . service . HandlerSpec ( ( _apply_presence_handler , ( type_ , from_ ) ) , require_deps = ( SimplePresenceDispatcher , ) ) )
return f
return decorator
|
def _validate_arguments ( self ) :
"""method to sanitize model parameters
Parameters
None
Returns
None"""
|
if self . _has_terms ( ) :
[ term . _validate_arguments ( ) for term in self . _terms ]
else :
super ( TensorTerm , self ) . _validate_arguments ( )
return self
|
def p_InDecrement ( p ) :
'''InDecrement : INDECREMENT Expression
| Expression INDECREMENT'''
|
from . helper import isString
if isString ( p [ 1 ] ) :
p [ 0 ] = InDecrement ( p [ 1 ] , p [ 2 ] , False )
else :
p [ 0 ] = InDecrement ( p [ 2 ] , p [ 1 ] , True )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.