signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def blastparser ( self , report , sample ) :
"""Parse the blast results , and store necessary data in dictionaries in sample object
: param report : Name of the blast output report being parsed
: param sample : sample object"""
|
# Open the sequence profile file as a dictionary
blastdict = DictReader ( open ( report ) , fieldnames = self . fieldnames , dialect = 'excel-tab' )
resultdict = dict ( )
# Initialise a dictionary to store all the target sequences
sample [ self . analysistype ] . targetsequence = dict ( )
# Go through each BLAST result
for row in blastdict : # Calculate the percent identity and extract the bitscore from the row
# Percent identity is the ( length of the alignment - number of mismatches ) / total subject length
percentidentity = float ( '{:0.2f}' . format ( ( float ( row [ 'positives' ] ) - float ( row [ 'gaps' ] ) ) / float ( row [ 'subject_length' ] ) * 100 ) )
target = row [ 'subject_id' ]
# If the percent identity is greater than the cutoff
if percentidentity >= self . cutoff : # Update the dictionary with the target and percent identity
resultdict . update ( { target : percentidentity } )
# Determine if the orientation of the sequence is reversed compared to the reference
if int ( row [ 'subject_end' ] ) < int ( row [ 'subject_start' ] ) : # Create a sequence object using Biopython
seq = Seq ( row [ 'query_sequence' ] , IUPAC . unambiguous_dna )
# Calculate the reverse complement of the sequence
querysequence = str ( seq . reverse_complement ( ) )
# If the sequence is not reversed , use the sequence as it is in the output
else :
querysequence = row [ 'query_sequence' ]
# Add the sequence in the correct orientation to the sample
sample [ self . analysistype ] . targetsequence [ target ] = querysequence
# Add the percent identity to the object
sample [ self . analysistype ] . blastresults = resultdict
# Populate missing results with ' NA ' values
if len ( resultdict ) == 0 :
sample [ self . analysistype ] . blastresults = 'NA'
|
def __field_to_parameter_type_and_format ( self , field ) :
"""Converts the field variant type into a tuple describing the parameter .
Args :
field : An instance of a subclass of messages . Field .
Returns :
A tuple with the type and format of the field , respectively .
Raises :
TypeError : if the field variant is a message variant ."""
|
# We use lowercase values for types ( e . g . ' string ' instead of ' STRING ' ) .
variant = field . variant
if variant == messages . Variant . MESSAGE :
raise TypeError ( 'A message variant cannot be used in a parameter.' )
# Note that the 64 - bit integers are marked as strings - - this is to
# accommodate JavaScript , which would otherwise demote them to 32 - bit
# integers .
return CUSTOM_VARIANT_MAP . get ( variant ) or ( variant . name . lower ( ) , None )
|
def reversed_dotted_parts ( s ) :
"""For a string " a . b . c " , yields " a . b . c " , " a . b " , " a " ."""
|
idx = - 1
if s :
yield s
while s :
idx = s . rfind ( '.' , 0 , idx )
if idx == - 1 :
break
yield s [ : idx ]
|
def initialize ( self , symbolic_vm : LaserEVM ) :
"""Initializes the BenchmarkPlugin
Introduces hooks in symbolic _ vm to track the desired values
: param symbolic _ vm : Symbolic virtual machine to analyze"""
|
self . _reset ( )
@ symbolic_vm . laser_hook ( "execute_state" )
def execute_state_hook ( _ ) :
current_time = time ( ) - self . begin
self . nr_of_executed_insns += 1
for key , value in symbolic_vm . coverage . items ( ) :
try :
self . coverage [ key ] [ current_time ] = sum ( value [ 1 ] ) * 100 / value [ 0 ]
except KeyError :
self . coverage [ key ] = { }
self . coverage [ key ] [ current_time ] = sum ( value [ 1 ] ) * 100 / value [ 0 ]
@ symbolic_vm . laser_hook ( "start_sym_exec" )
def start_sym_exec_hook ( ) :
self . begin = time ( )
@ symbolic_vm . laser_hook ( "stop_sym_exec" )
def stop_sym_exec_hook ( ) :
self . end = time ( )
self . _write_to_graph ( )
self . _store_report ( )
|
def ensure_time_avg_has_cf_metadata ( ds ) :
"""Add time interval length and bounds coordinates for time avg data .
If the Dataset or DataArray contains time average data , enforce
that there are coordinates that track the lower and upper bounds of
the time intervals , and that there is a coordinate that tracks the
amount of time per time average interval .
CF conventions require that a quantity stored as time averages
over time intervals must have time and time _ bounds coordinates [ 1 ] _ .
aospy further requires AVERAGE _ DT for time average data , for accurate
time - weighted averages , which can be inferred from the CF - required
time _ bounds coordinate if needed . This step should be done
prior to decoding CF metadata with xarray to ensure proper
computed timedeltas for different calendar types .
. . [ 1 ] http : / / cfconventions . org / cf - conventions / v1.6.0 / cf - conventions . html # _ data _ representative _ of _ cells
Parameters
ds : Dataset or DataArray
Input data
Returns
Dataset or DataArray
Time average metadata attributes added if needed ."""
|
# noqa : E501
if TIME_WEIGHTS_STR not in ds :
time_weights = ds [ TIME_BOUNDS_STR ] . diff ( BOUNDS_STR )
time_weights = time_weights . rename ( TIME_WEIGHTS_STR ) . squeeze ( )
if BOUNDS_STR in time_weights . coords :
time_weights = time_weights . drop ( BOUNDS_STR )
ds [ TIME_WEIGHTS_STR ] = time_weights
raw_start_date = ds [ TIME_BOUNDS_STR ] . isel ( ** { TIME_STR : 0 , BOUNDS_STR : 0 } )
ds [ RAW_START_DATE_STR ] = raw_start_date . reset_coords ( drop = True )
raw_end_date = ds [ TIME_BOUNDS_STR ] . isel ( ** { TIME_STR : - 1 , BOUNDS_STR : 1 } )
ds [ RAW_END_DATE_STR ] = raw_end_date . reset_coords ( drop = True )
for coord in [ TIME_BOUNDS_STR , RAW_START_DATE_STR , RAW_END_DATE_STR ] :
ds [ coord ] . attrs [ 'units' ] = ds [ TIME_STR ] . attrs [ 'units' ]
if 'calendar' in ds [ TIME_STR ] . attrs :
ds [ coord ] . attrs [ 'calendar' ] = ds [ TIME_STR ] . attrs [ 'calendar' ]
unit_interval = ds [ TIME_STR ] . attrs [ 'units' ] . split ( 'since' ) [ 0 ] . strip ( )
ds [ TIME_WEIGHTS_STR ] . attrs [ 'units' ] = unit_interval
return ds
|
def add_all ( self , bucket , quiet = False ) :
"""Ensures the query result is consistent with all prior
mutations performed by a given bucket .
Using this function is equivalent to keeping track of all
mutations performed by the given bucket , and passing them to
: meth : ` ~ add _ result `
: param bucket : A : class : ` ~ couchbase . bucket . Bucket ` object
used for the mutations
: param quiet : If the bucket contains no valid mutations , this
option suppresses throwing exceptions .
: return : ` True ` if at least one mutation was added , ` False ` if none
were added ( and ` quiet ` was specified )
: raise : : exc : ` ~ . MissingTokenError ` if no mutations were added and
` quiet ` was not specified"""
|
added = False
for mt in bucket . _mutinfo ( ) :
added = True
self . _add_scanvec ( mt )
if not added and not quiet :
raise MissingTokenError ( 'Bucket object contains no tokens!' )
return added
|
def createCashContract ( self , symbol , currency = "USD" , exchange = "IDEALPRO" ) :
"""Used for FX , etc :
createCashContract ( " EUR " , currency = " USD " )"""
|
contract_tuple = ( symbol , "CASH" , exchange , currency , "" , 0.0 , "" )
contract = self . createContract ( contract_tuple )
return contract
|
def _upload_part ( api , session , url , upload , part_number , part , retry_count , timeout ) :
"""Used by the worker to upload a part to the storage service .
: param api : Api instance .
: param session : Storage service session .
: param url : Part url .
: param upload : Upload identifier .
: param part _ number : Part number .
: param part : Part data .
: param retry _ count : Number of times to retry .
: param timeout : Timeout for storage session ."""
|
part_url = retry ( retry_count ) ( _get_part_url ) ( api , url , upload , part_number )
e_tag = retry ( retry_count ) ( _submit_part ) ( session , part_url , part , timeout )
retry ( retry_count ) ( _report_part ) ( api , url , upload , part_number , e_tag )
|
def setSize ( self , size , sizeIsEstimated ) :
"""Update size ."""
|
self . _size = size
self . _sizeIsEstimated = sizeIsEstimated
if self . fromVol is not None and size is not None and not sizeIsEstimated :
Diff . theKnownSizes [ self . toUUID ] [ self . fromUUID ] = size
|
def create ( self , component_context , overriding_args ) :
"""Creates a new instance of the component , respecting the scope .
: param component _ context : The context to resolve dependencies from .
: param overriding _ args : Overriding arguments to use ( by name ) instead of resolving them .
: return : An instance of the component ."""
|
return self . component_scope . instance ( lambda : self . _create ( component_context , overriding_args ) )
|
def form_adverb_from_adjective ( adjective ) :
"""Forms an adverb from the input adjective , f . ex . " happy " = > " happily " .
Adverbs are generated using rules from : http : / / www . edufind . com / english - grammar / forming - adverbs - adjectives /
: param adjective : adjective
: return : adverb form of the input adjective"""
|
# If the adjective ends in - able , - ible , or - le , replace the - e with - y
if adjective . endswith ( "able" ) or adjective . endswith ( "ible" ) or adjective . endswith ( "le" ) :
return adjective [ : - 1 ] + "y"
# If the adjective ends in - y , replace the y with i and add - ly
elif adjective . endswith ( "y" ) :
return adjective [ : - 1 ] + "ily"
# If the adjective ends in - ic , add - ally
elif adjective . endswith ( "ic" ) :
return adjective [ : - 2 ] + "ally"
# In most cases , an adverb is formed by adding - ly to an adjective
return adjective + "ly"
|
def remove_component ( self , entity , component_type ) :
"""Remove the component of component _ type from entity .
Long - hand for : func : ` essence . Entity . remove ` .
: param entity : entity to associate
: type entity : : class : ` essence . Entity `
: param component _ type : Type of component
: type component _ type : The : class : ` type ` of a : class : ` Component ` subclass"""
|
relation = self . _get_relation ( component_type )
del relation [ entity ]
self . _entities_with ( component_type ) . remove ( entity )
|
def get_soup ( page = '' ) :
"""Returns a bs4 object of the page requested"""
|
content = requests . get ( '%s/%s' % ( BASE_URL , page ) ) . text
return BeautifulSoup ( content )
|
def _OpenFileObject ( self , path_spec ) :
"""Opens the file - like object defined by path specification .
Args :
path _ spec ( PathSpec ) : path specification .
Returns :
FileIO : a file - like object .
Raises :
PathSpecError : if the path specification is incorrect ."""
|
if not path_spec . HasParent ( ) :
raise errors . PathSpecError ( 'Unsupported path specification without parent.' )
resolver . Resolver . key_chain . ExtractCredentialsFromPathSpec ( path_spec )
file_object = resolver . Resolver . OpenFileObject ( path_spec . parent , resolver_context = self . _resolver_context )
fvde_volume = pyfvde . volume ( )
fvde . FVDEVolumeOpen ( fvde_volume , path_spec , file_object , resolver . Resolver . key_chain )
return fvde_volume
|
def server_receives_binary_from ( self , name = None , timeout = None , connection = None , label = None ) :
"""Receive raw binary message . Returns message , ip , and port .
If server ` name ` is not given , uses the latest server . Optional message
` label ` is shown on logs .
Examples :
| $ { binary } | $ { ip } | $ { port } = | Server receives binary from |
| $ { binary } | $ { ip } | $ { port } = | Server receives binary from | Server1 | connection = my _ connection | timeout = 5 |"""
|
server , name = self . _servers . get_with_name ( name )
msg , ip , port = server . receive_from ( timeout = timeout , alias = connection )
self . _register_receive ( server , label , name , connection = connection )
return msg , ip , port
|
def accept ( self , context ) :
"""Check if the context could be accepted by the oracle
Args :
context : s sequence same type as the oracle data
Returns :
bAccepted : whether the sequence is accepted or not
_ next : the state where the sequence is accepted"""
|
_next = 0
for _s in context :
_data = [ self . data [ j ] for j in self . trn [ _next ] ]
if _s in _data :
_next = self . trn [ _next ] [ _data . index ( _s ) ]
else :
return 0 , _next
return 1 , _next
|
def open ( self ) :
"""Open a connection to the device ."""
|
device_type = 'cisco_ios'
if self . transport == 'telnet' :
device_type = 'cisco_ios_telnet'
self . device = ConnectHandler ( device_type = device_type , host = self . hostname , username = self . username , password = self . password , ** self . netmiko_optional_args )
# ensure in enable mode
self . device . enable ( )
|
def _surfdens ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ surfdens
PURPOSE :
evaluate the surface density for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the surface density
HISTORY :
2018-08-19 - Written - Bovy ( UofT )"""
|
return 2. * integrate . quad ( lambda x : self . _dens ( R , x , phi = phi , t = t ) , 0 , z ) [ 0 ]
|
def delete_share ( self , share_name , fail_not_exist = False , timeout = None , snapshot = None , delete_snapshots = None ) :
'''Marks the specified share for deletion . If the share
does not exist , the operation fails on the service . By
default , the exception is swallowed by the client .
To expose the exception , specify True for fail _ not _ exist .
: param str share _ name :
Name of share to delete .
: param bool fail _ not _ exist :
Specify whether to throw an exception when the share doesn ' t
exist . False by default .
: param int timeout :
The timeout parameter is expressed in seconds .
: param str snapshot :
A string that represents the snapshot version , if applicable .
Specify this argument to delete a specific snapshot only .
delete _ snapshots must be None if this is specified .
: param ~ azure . storage . file . models . DeleteSnapshot delete _ snapshots :
To delete a share that has snapshots , this must be specified as DeleteSnapshot . Include .
: return : True if share is deleted , False share doesn ' t exist .
: rtype : bool'''
|
_validate_not_none ( 'share_name' , share_name )
request = HTTPRequest ( )
request . method = 'DELETE'
request . host_locations = self . _get_host_locations ( )
request . path = _get_path ( share_name )
request . headers = { 'x-ms-delete-snapshots' : _to_str ( delete_snapshots ) }
request . query = { 'restype' : 'share' , 'timeout' : _int_to_str ( timeout ) , 'sharesnapshot' : _to_str ( snapshot ) , }
if not fail_not_exist :
try :
self . _perform_request ( request , expected_errors = [ _SHARE_NOT_FOUND_ERROR_CODE ] )
return True
except AzureHttpError as ex :
_dont_fail_not_exist ( ex )
return False
else :
self . _perform_request ( request )
return True
|
def batch_length ( batch ) :
'''Determine the number of samples in a batch .
Parameters
batch : dict
A batch dictionary . Each value must implement ` len ` .
All values must have the same ` len ` .
Returns
n : int > = 0 or None
The number of samples in this batch .
If the batch has no fields , n is None .
Raises
PescadorError
If some two values have unequal length'''
|
n = None
for value in six . itervalues ( batch ) :
if n is None :
n = len ( value )
elif len ( value ) != n :
raise PescadorError ( 'Unequal field lengths' )
return n
|
def string_to_data_time ( d ) :
'''simple parse date string , such as :
2016-5-27 21:22:20
2016-05-27 21:22:2
2016/05/27 21:22:2
2016-05-27
2016/5/27
21:22:2'''
|
if d :
d = d . replace ( '/' , '-' )
if ' ' in d :
_datetime = d . split ( ' ' )
if len ( _datetime ) == 2 :
_d = _string_to_date ( _datetime [ 0 ] )
_t = _string_to_time ( _datetime [ 1 ] )
return _combine_date_time ( _d , _t )
else : # no space
if '-' in d :
return date_to_datetime ( _string_to_date ( d ) )
elif ':' in d :
return time_to_datetime ( _string_to_time ( d ) )
return None
|
def get ( self , name , ** kwargs ) :
"""Get the variable given a name if one exists or create a new one if missing .
Parameters
name : str
name of the variable
* * kwargs :
more arguments that ' s passed to symbol . Variable"""
|
name = self . _prefix + name
if name not in self . _params :
self . _params [ name ] = symbol . Variable ( name , ** kwargs )
return self . _params [ name ]
|
def _activate_texture ( mesh , name ) :
"""Grab a texture and update the active texture coordinates . This makes
sure to not destroy old texture coordinates
Parameters
name : str
The name of the texture and texture coordinates to activate
Return
vtk . vtkTexture : The active texture"""
|
if name == True or isinstance ( name , int ) :
keys = list ( mesh . textures . keys ( ) )
# Grab the first name availabe if True
idx = 0 if not isinstance ( name , int ) or name == True else name
if idx > len ( keys ) :
idx = 0
try :
name = keys [ idx ]
except IndexError :
logging . warning ( 'No textures associated with input mesh.' )
return None
# Grab the texture object by name
try :
texture = mesh . textures [ name ]
except KeyError :
logging . warning ( 'Texture ({}) not associated with this dataset' . format ( name ) )
texture = None
else : # Be sure to reset the tcoords if present
# Grab old coordinates
if name in mesh . scalar_names :
old_tcoord = mesh . GetPointData ( ) . GetTCoords ( )
mesh . GetPointData ( ) . SetTCoords ( mesh . GetPointData ( ) . GetArray ( name ) )
mesh . GetPointData ( ) . AddArray ( old_tcoord )
mesh . Modified ( )
return texture
|
def atexit_register ( func ) :
"""Uses either uwsgi ' s atexit mechanism , or atexit from the stdlib .
When running under uwsgi , using their atexit handler is more reliable ,
especially when using gevent
: param func : the function to call at exit"""
|
try :
import uwsgi
orig = getattr ( uwsgi , "atexit" , None )
def uwsgi_atexit ( ) :
if callable ( orig ) :
orig ( )
func ( )
uwsgi . atexit = uwsgi_atexit
except ImportError :
atexit . register ( func )
|
def update_access_key ( self , access_key_id , status , user_name = None ) :
"""Changes the status of the specified access key from Active to Inactive
or vice versa . This action can be used to disable a user ' s key as
part of a key rotation workflow .
If the user _ name is not specified , the user _ name is determined
implicitly based on the AWS Access Key ID used to sign the request .
: type access _ key _ id : string
: param access _ key _ id : The ID of the access key .
: type status : string
: param status : Either Active or Inactive .
: type user _ name : string
: param user _ name : The username of user ( optional ) ."""
|
params = { 'AccessKeyId' : access_key_id , 'Status' : status }
if user_name :
params [ 'UserName' ] = user_name
return self . get_response ( 'UpdateAccessKey' , params )
|
def _create_technical_words_dictionary ( spellchecker_cache_path , relative_path , user_words , shadow ) :
"""Create Dictionary at spellchecker _ cache _ path with technical words ."""
|
technical_terms_set = ( user_words | technical_words_from_shadow_contents ( shadow ) )
technical_words = Dictionary ( technical_terms_set , "technical_words_" + relative_path . replace ( os . path . sep , "_" ) , [ os . path . realpath ( relative_path ) ] , spellchecker_cache_path )
return technical_words
|
def get_directory ( self , identifier ) :
"""Implements the policy for naming directories for image objects . Image
object directories are name by their identifier . In addition , these
directories are grouped in parent directories named by the first two
characters of the identifier . The aim is to avoid having too many
sub - folders in a single directory .
Parameters
identifier : string
Unique object identifier
Returns
string
Path to image objects data directory"""
|
return os . path . join ( os . path . join ( self . directory , identifier [ : 2 ] ) , identifier )
|
def is_dirty ( self , index = True , working_tree = True , untracked_files = False , submodules = True , path = None ) :
""": return :
` ` True ` ` , the repository is considered dirty . By default it will react
like a git - status without untracked files , hence it is dirty if the
index or the working copy have changes ."""
|
if self . _bare : # Bare repositories with no associated working directory are
# always consired to be clean .
return False
# start from the one which is fastest to evaluate
default_args = [ '--abbrev=40' , '--full-index' , '--raw' ]
if not submodules :
default_args . append ( '--ignore-submodules' )
if path :
default_args . append ( path )
if index : # diff index against HEAD
if osp . isfile ( self . index . path ) and len ( self . git . diff ( '--cached' , * default_args ) ) :
return True
# END index handling
if working_tree : # diff index against working tree
if len ( self . git . diff ( * default_args ) ) :
return True
# END working tree handling
if untracked_files :
if len ( self . _get_untracked_files ( path , ignore_submodules = not submodules ) ) :
return True
# END untracked files
return False
|
def read_body ( self , request , response , file = None , raw = False ) :
'''Read the response ' s content body .
Coroutine .'''
|
if is_no_body ( request , response ) :
return
if not raw :
self . _setup_decompressor ( response )
read_strategy = self . get_read_strategy ( response )
if self . _ignore_length and read_strategy == 'length' :
read_strategy = 'close'
if read_strategy == 'chunked' :
yield from self . _read_body_by_chunk ( response , file , raw = raw )
elif read_strategy == 'length' :
yield from self . _read_body_by_length ( response , file )
else :
yield from self . _read_body_until_close ( response , file )
should_close = wpull . protocol . http . util . should_close ( request . version , response . fields . get ( 'Connection' ) )
if not self . _keep_alive or should_close :
_logger . debug ( 'Not keep-alive. Closing connection.' )
self . close ( )
|
def _get_timestamp ( dirname_full , remove ) :
"""Get the timestamp from the timestamp file .
Optionally mark it for removal if we ' re going to write another one ."""
|
record_filename = os . path . join ( dirname_full , RECORD_FILENAME )
if not os . path . exists ( record_filename ) :
return None
mtime = os . stat ( record_filename ) . st_mtime
mtime_str = datetime . fromtimestamp ( mtime )
print ( 'Found timestamp {}:{}' . format ( dirname_full , mtime_str ) )
if Settings . record_timestamp and remove :
OLD_TIMESTAMPS . add ( record_filename )
return mtime
|
def squared_toroidal_dist ( p1 , p2 , world_size = ( 60 , 60 ) ) :
"""Separated out because sqrt has a lot of overhead"""
|
halfx = world_size [ 0 ] / 2.0
if world_size [ 0 ] == world_size [ 1 ] :
halfy = halfx
else :
halfy = world_size [ 1 ] / 2.0
deltax = p1 [ 0 ] - p2 [ 0 ]
if deltax < - halfx :
deltax += world_size [ 0 ]
elif deltax > halfx :
deltax -= world_size [ 0 ]
deltay = p1 [ 1 ] - p2 [ 1 ]
if deltay < - halfy :
deltay += world_size [ 1 ]
elif deltay > halfy :
deltay -= world_size [ 1 ]
return deltax * deltax + deltay * deltay
|
def force_utc ( time , name = 'field' , precision = 6 ) :
"""Appending ' Z ' to isoformatted time - explicit timezone is required for most APIs"""
|
if not isinstance ( time , datetime . datetime ) :
raise CloudValueError ( "%s should be of type datetime" % ( name , ) )
clip = 6 - precision
timestring = time . isoformat ( )
if clip :
timestring = timestring [ : - clip ]
return timestring + "Z"
|
def getPrevUrl ( self , url , data ) :
"""Find previous URL ."""
|
prevUrl = None
if self . prevSearch :
try :
prevUrl = self . fetchUrl ( url , data , self . prevSearch )
except ValueError as msg : # assume there is no previous URL , but print a warning
out . warn ( u"%s Assuming no previous comic strips exist." % msg )
else :
prevUrl = self . prevUrlModifier ( prevUrl )
out . debug ( u"Found previous URL %s" % prevUrl )
getHandler ( ) . comicPageLink ( self . getName ( ) , url , prevUrl )
return prevUrl
|
def execute ( self , command , * args , ** kw ) :
"""Executes redis command in a free connection and returns
future waiting for result .
Picks connection from free pool and send command through
that connection .
If no connection is found , returns coroutine waiting for
free connection to execute command ."""
|
conn , address = self . get_connection ( command , args )
if conn is not None :
fut = conn . execute ( command , * args , ** kw )
return self . _check_result ( fut , command , args , kw )
else :
coro = self . _wait_execute ( address , command , args , kw )
return self . _check_result ( coro , command , args , kw )
|
def wrap_command ( cmds , data_dirs , cls , strict = True ) :
"""Wrap a setup command
Parameters
cmds : list ( str )
The names of the other commands to run prior to the command .
strict : boolean , optional
Wether to raise errors when a pre - command fails ."""
|
class WrappedCommand ( cls ) :
def run ( self ) :
if not getattr ( self , 'uninstall' , None ) :
try :
[ self . run_command ( cmd ) for cmd in cmds ]
except Exception :
if strict :
raise
else :
pass
result = cls . run ( self )
data_files = [ ]
for dname in data_dirs :
data_files . extend ( get_data_files ( dname ) )
# update data - files in case this created new files
self . distribution . data_files = data_files
# also update package data
update_package_data ( self . distribution )
return result
return WrappedCommand
|
def tokeniter ( self , source , name , filename = None , state = None ) :
"""This method tokenizes the text and returns the tokens in a
generator . Use this method if you just want to tokenize a template ."""
|
source = '\n' . join ( unicode ( source ) . splitlines ( ) )
pos = 0
lineno = 1
stack = [ 'root' ]
if state is not None and state != 'root' :
assert state in ( 'variable' , 'block' ) , 'invalid state'
stack . append ( state + '_begin' )
else :
state = 'root'
statetokens = self . rules [ stack [ - 1 ] ]
source_length = len ( source )
balancing_stack = [ ]
while 1 : # tokenizer loop
for regex , tokens , new_state in statetokens :
m = regex . match ( source , pos )
# if no match we try again with the next rule
if m is None :
continue
# we only match blocks and variables if brances / parentheses
# are balanced . continue parsing with the lower rule which
# is the operator rule . do this only if the end tags look
# like operators
if balancing_stack and tokens in ( 'variable_end' , 'block_end' , 'linestatement_end' ) :
continue
# tuples support more options
if isinstance ( tokens , tuple ) :
for idx , token in enumerate ( tokens ) : # failure group
if token . __class__ is Failure :
raise token ( lineno , filename )
# bygroup is a bit more complex , in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup' :
for key , value in m . groupdict ( ) . iteritems ( ) :
if value is not None :
yield lineno , key , value
lineno += value . count ( '\n' )
break
else :
raise RuntimeError ( '%r wanted to resolve ' 'the token dynamically' ' but no group matched' % regex )
# normal group
else :
data = m . group ( idx + 1 )
if data or token not in ignore_if_empty :
yield lineno , token , data
lineno += data . count ( '\n' )
# strings as token just are yielded as it .
else :
data = m . group ( )
# update brace / parentheses balance
if tokens == 'operator' :
if data == '{' :
balancing_stack . append ( '}' )
elif data == '(' :
balancing_stack . append ( ')' )
elif data == '[' :
balancing_stack . append ( ']' )
elif data in ( '}' , ')' , ']' ) :
if not balancing_stack :
raise TemplateSyntaxError ( 'unexpected \'%s\'' % data , lineno , name , filename )
expected_op = balancing_stack . pop ( )
if expected_op != data :
raise TemplateSyntaxError ( 'unexpected \'%s\', ' 'expected \'%s\'' % ( data , expected_op ) , lineno , name , filename )
# yield items
if data or tokens not in ignore_if_empty :
yield lineno , tokens , data
lineno += data . count ( '\n' )
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m . end ( )
# handle state changes
if new_state is not None : # remove the uppermost state
if new_state == '#pop' :
stack . pop ( )
# resolve the new state by group checking
elif new_state == '#bygroup' :
for key , value in m . groupdict ( ) . iteritems ( ) :
if value is not None :
stack . append ( key )
break
else :
raise RuntimeError ( '%r wanted to resolve the ' 'new state dynamically but' ' no group matched' % regex )
# direct state name given
else :
stack . append ( new_state )
statetokens = self . rules [ stack [ - 1 ] ]
# we are still at the same position and no stack change .
# this means a loop without break condition , avoid that and
# raise error
elif pos2 == pos :
raise RuntimeError ( '%r yielded empty string without ' 'stack change' % regex )
# publish new function and start again
pos = pos2
break
# if loop terminated without break we havn ' t found a single match
# either we are at the end of the file or we have a problem
else : # end of text
if pos >= source_length :
return
# something went wrong
raise TemplateSyntaxError ( 'unexpected char %r at %d' % ( source [ pos ] , pos ) , lineno , name , filename )
|
def bdd_common_after_all ( context_or_world ) :
"""Common after all method in behave or lettuce
: param context _ or _ world : behave context or lettuce world"""
|
# Close drivers
DriverWrappersPool . close_drivers ( scope = 'session' , test_name = 'multiple_tests' , test_passed = context_or_world . global_status [ 'test_passed' ] )
# Update tests status in Jira
change_all_jira_status ( )
|
def getoptlist ( self , p ) :
"""Returns all option values stored that match p as a list ."""
|
optlist = [ ]
for k , v in self . pairs :
if k == p :
optlist . append ( v )
return optlist
|
def output ( self , output , status = None ) :
"""Output text to stdout or a pager command .
The status text is not outputted to pager or files .
The message will be logged in the audit log , if enabled . The
message will be written to the tee file , if enabled . The
message will be written to the output file , if enabled ."""
|
if output :
size = self . cli . output . get_size ( )
margin = self . get_output_margin ( status )
fits = True
buf = [ ]
output_via_pager = self . explicit_pager and special . is_pager_enabled ( )
for i , line in enumerate ( output , 1 ) :
special . write_tee ( line )
special . write_once ( line )
if fits or output_via_pager : # buffering
buf . append ( line )
if len ( line ) > size . columns or i > ( size . rows - margin ) :
fits = False
if not self . explicit_pager and special . is_pager_enabled ( ) : # doesn ' t fit , use pager
output_via_pager = True
if not output_via_pager : # doesn ' t fit , flush buffer
for line in buf :
click . secho ( line )
buf = [ ]
else :
click . secho ( line )
if buf :
if output_via_pager : # sadly click . echo _ via _ pager doesn ' t accept generators
click . echo_via_pager ( "\n" . join ( buf ) )
else :
for line in buf :
click . secho ( line )
if status :
click . secho ( status )
|
def get_plugin_modules ( folders , package = 'plugins' , parentpackage = 'linkcheck.dummy' ) :
"""Get plugin modules for given folders ."""
|
for folder in folders :
for module in loader . get_folder_modules ( folder , parentpackage ) :
yield module
for module in loader . get_package_modules ( package ) :
yield module
|
def pipeline ( self , source = None , phase = 'build' , ps = None ) :
"""Construct the ETL pipeline for all phases . Segments that are not used for the current phase
are filtered out later .
: param source : A source object , or a source string name
: return : an etl Pipeline"""
|
from ambry . etl . pipeline import Pipeline , PartitionWriter
from ambry . dbexceptions import ConfigurationError
if source :
source = self . source ( source ) if isinstance ( source , string_types ) else source
else :
source = None
sf , sp = self . source_pipe ( source , ps ) if source else ( None , None )
pl = Pipeline ( self , source = sp )
# Get the default pipeline , from the config at the head of this file .
try :
phase_config = self . default_pipelines [ phase ]
except KeyError :
phase_config = None
# Ok for non - conventional pipe names
if phase_config :
pl . configure ( phase_config )
# Find the pipe configuration , from the metadata
pipe_config = None
pipe_name = None
if source and source . pipeline :
pipe_name = source . pipeline
try :
pipe_config = self . metadata . pipelines [ pipe_name ]
except KeyError :
raise ConfigurationError ( "Pipeline '{}' declared in source '{}', but not found in metadata" . format ( source . pipeline , source . name ) )
else :
pipe_name , pipe_config = self . _find_pipeline ( source , phase )
if pipe_name :
pl . name = pipe_name
else :
pl . name = phase
pl . phase = phase
# The pipe _ config can either be a list , in which case it is a list of pipe pipes for the
# augment segment or it could be a dict , in which case each is a list of pipes
# for the named segments .
def apply_config ( pl , pipe_config ) :
if isinstance ( pipe_config , ( list , tuple ) ) : # Just convert it to dict form for the next section
# PartitionWriters are always moved to the ' store ' section
store , body = [ ] , [ ]
for pipe in pipe_config :
store . append ( pipe ) if isinstance ( pipe , PartitionWriter ) else body . append ( pipe )
pipe_config = dict ( body = body , store = store )
if pipe_config :
pl . configure ( pipe_config )
apply_config ( pl , pipe_config )
# One more time , for the configuration for ' all ' phases
if 'all' in self . metadata . pipelines :
apply_config ( pl , self . metadata . pipelines [ 'all' ] )
# Allows developer to over ride pipe configuration in code
self . edit_pipeline ( pl )
try :
pl . dest_table = source . dest_table_name
pl . source_table = source . source_table . name
pl . source_name = source . name
except AttributeError :
pl . dest_table = None
return pl
|
def minimize ( self , minimize ) :
'''Configures the ABC to minimize fitness function return value or
derived score
Args :
minimize ( bool ) : if True , minimizes fitness function return value ;
if False , minimizes derived score'''
|
self . _minimize = minimize
self . _logger . log ( 'debug' , 'Minimize set to {}' . format ( minimize ) )
|
def roc_auc_xlim ( x_bla , y_bla , xlim = 0.1 ) :
"""Computes the ROC Area Under Curve until a certain FPR value .
Parameters
fg _ vals : array _ like
list of values for positive set
bg _ vals : array _ like
list of values for negative set
xlim : float , optional
FPR value
Returns
score : float
ROC AUC score"""
|
x = x_bla [ : ]
y = y_bla [ : ]
x . sort ( )
y . sort ( )
u = { }
for i in x + y :
u [ i ] = 1
vals = sorted ( u . keys ( ) )
len_x = float ( len ( x ) )
len_y = float ( len ( y ) )
new_x = [ ]
new_y = [ ]
x_p = 0
y_p = 0
for val in vals [ : : - 1 ] :
while len ( x ) > 0 and x [ - 1 ] >= val :
x . pop ( )
x_p += 1
while len ( y ) > 0 and y [ - 1 ] >= val :
y . pop ( )
y_p += 1
new_y . append ( ( len_x - x_p ) / len_x )
new_x . append ( ( len_y - y_p ) / len_y )
# print new _ x
# print new _ y
new_x = 1 - np . array ( new_x )
new_y = 1 - np . array ( new_y )
# plot ( new _ x , new _ y )
# show ( )
x = new_x
y = new_y
if len ( x ) != len ( y ) :
raise ValueError ( "Unequal!" )
if not xlim :
xlim = 1.0
auc = 0.0
bla = zip ( stats . rankdata ( x ) , range ( len ( x ) ) )
bla = sorted ( bla , key = lambda x : x [ 1 ] )
prev_x = x [ bla [ 0 ] [ 1 ] ]
prev_y = y [ bla [ 0 ] [ 1 ] ]
index = 1
while index < len ( bla ) and x [ bla [ index ] [ 1 ] ] <= xlim :
_ , i = bla [ index ]
auc += y [ i ] * ( x [ i ] - prev_x ) - ( ( x [ i ] - prev_x ) * ( y [ i ] - prev_y ) / 2.0 )
prev_x = x [ i ]
prev_y = y [ i ]
index += 1
if index < len ( bla ) :
( rank , i ) = bla [ index ]
auc += prev_y * ( xlim - prev_x ) + ( ( y [ i ] - prev_y ) / ( x [ i ] - prev_x ) * ( xlim - prev_x ) * ( xlim - prev_x ) / 2 )
return auc
|
def _subtract_timedelta ( self , delta ) :
"""Remove timedelta duration from the instance .
: param delta : The timedelta instance
: type delta : pendulum . Duration or datetime . timedelta
: rtype : DateTime"""
|
if isinstance ( delta , pendulum . Duration ) :
return self . subtract ( years = delta . years , months = delta . months , weeks = delta . weeks , days = delta . remaining_days , hours = delta . hours , minutes = delta . minutes , seconds = delta . remaining_seconds , microseconds = delta . microseconds , )
return self . subtract ( days = delta . days , seconds = delta . seconds , microseconds = delta . microseconds )
|
def get_widget_from_id ( id ) :
"""returns widget object by id
example web - htmltextwidget - 2-2"""
|
res = id . split ( '-' )
try :
model_cls = apps . get_model ( res [ 0 ] , res [ 1 ] )
obj = model_cls . objects . get ( parent = res [ 2 ] , id = res [ 3 ] )
except :
obj = None
return obj
|
def apply_hds_obs ( hds_file ) :
"""process a modflow head save file . A companion function to
setup _ hds _ obs that is called during the forward run process
Parameters
hds _ file : str
a modflow head save filename . if hds _ file ends with ' ucn ' ,
then the file is treated as a UcnFile type .
Note
requires flopy
writes < hds _ file > . dat
expects < hds _ file > . dat . ins to exist
uses pyemu . pst _ utils . parse _ ins _ file to get observation names"""
|
try :
import flopy
except Exception as e :
raise Exception ( "apply_hds_obs(): error importing flopy: {0}" . format ( str ( e ) ) )
from . . import pst_utils
assert os . path . exists ( hds_file )
out_file = hds_file + ".dat"
ins_file = out_file + ".ins"
assert os . path . exists ( ins_file )
df = pd . DataFrame ( { "obsnme" : pst_utils . parse_ins_file ( ins_file ) } )
df . index = df . obsnme
# populate metdata
items = [ "k" , "i" , "j" , "kper" ]
for i , item in enumerate ( items ) :
df . loc [ : , item ] = df . obsnme . apply ( lambda x : int ( x . split ( '_' ) [ i + 1 ] ) )
if hds_file . lower ( ) . endswith ( 'ucn' ) :
hds = flopy . utils . UcnFile ( hds_file )
else :
hds = flopy . utils . HeadFile ( hds_file )
kpers = df . kper . unique ( )
df . loc [ : , "obsval" ] = np . NaN
for kper in kpers :
kstp = last_kstp_from_kper ( hds , kper )
data = hds . get_data ( kstpkper = ( kstp , kper ) )
# jwhite 15jan2018 fix for really large values that are getting some
# trash added to them . . .
data [ np . isnan ( data ) ] = 0.0
data [ data > 1.0e+20 ] = 1.0e+20
data [ data < - 1.0e+20 ] = - 1.0e+20
df_kper = df . loc [ df . kper == kper , : ]
df . loc [ df_kper . index , "obsval" ] = data [ df_kper . k , df_kper . i , df_kper . j ]
assert df . dropna ( ) . shape [ 0 ] == df . shape [ 0 ]
df . loc [ : , [ "obsnme" , "obsval" ] ] . to_csv ( out_file , index = False , sep = " " )
|
def _round ( self ) :
"""This is the environment implementation of
: meth : ` BaseAnchor . round ` .
Subclasses may override this method ."""
|
self . x = normalizers . normalizeRounding ( self . x )
self . y = normalizers . normalizeRounding ( self . y )
|
def find_root_tex_document ( base_dir = "." ) :
"""Find the tex article in the current directory that can be considered
a root . We do this by searching contents for ` ` ' \ documentclass ' ` ` .
Parameters
base _ dir : str
Directory to search for LaTeX documents , relative to the current
working directory .
Returns
tex _ path : str
Path to the root tex document relative to the current
working directory ."""
|
log = logging . getLogger ( __name__ )
for tex_path in iter_tex_documents ( base_dir = base_dir ) :
with codecs . open ( tex_path , 'r' , encoding = 'utf-8' ) as f :
text = f . read ( )
if len ( docclass_pattern . findall ( text ) ) > 0 :
log . debug ( "Found root tex {0}" . format ( tex_path ) )
return tex_path
log . warning ( "Could not find a root .tex file" )
raise RootNotFound
|
def _create_results_summary ( self ) :
"""Create the dataframe that displays the estimation results , and store
it on the model instance .
Returns
None ."""
|
# Make sure we have all attributes needed to create the results summary
needed_attributes = [ "params" , "standard_errors" , "tvalues" , "pvalues" , "robust_std_errs" , "robust_t_stats" , "robust_p_vals" ]
try :
assert all ( [ hasattr ( self , attr ) for attr in needed_attributes ] )
assert all ( [ isinstance ( getattr ( self , attr ) , pd . Series ) for attr in needed_attributes ] )
except AssertionError :
msg = "Call this function only after setting/calculating all other"
msg_2 = " estimation results attributes"
raise NotImplementedError ( msg + msg_2 )
self . summary = pd . concat ( ( self . params , self . standard_errors , self . tvalues , self . pvalues , self . robust_std_errs , self . robust_t_stats , self . robust_p_vals ) , axis = 1 )
return None
|
def preprocess ( self , nb : "NotebookNode" , resources : dict ) -> Tuple [ "NotebookNode" , dict ] :
"""Preprocess the entire Notebook ."""
|
exam_num = resources [ "exam_num" ]
time = resources [ "time" ]
date = resources [ "date" ]
nb . cells . insert ( 0 , new_markdown_cell ( source = "---" ) )
nb . cells . insert ( 0 , new_markdown_cell ( source = "" ) )
nb . cells . insert ( 0 , exam_instructions_cell )
first_cell_source = ( "# ME 2233: Thermodynamic Principles\n\n" f"# Exam {exam_num} - {time}\n\n# {date}" )
nb . cells . insert ( 0 , new_markdown_cell ( source = first_cell_source ) )
return nb , resources
|
def extract_event_info ( dstore , eidx ) :
"""Extract information about the given event index .
Example :
http : / / 127.0.0.1:8800 / v1 / calc / 30 / extract / event _ info / 0"""
|
event = dstore [ 'events' ] [ int ( eidx ) ]
serial = int ( event [ 'eid' ] // TWO32 )
ridx = list ( dstore [ 'ruptures' ] [ 'serial' ] ) . index ( serial )
[ getter ] = getters . gen_rupture_getters ( dstore , slice ( ridx , ridx + 1 ) )
rupdict = getter . get_rupdict ( )
rlzi = event [ 'rlz' ]
rlzs_assoc = dstore [ 'csm_info' ] . get_rlzs_assoc ( )
gsim = rlzs_assoc . gsim_by_trt [ rlzi ] [ rupdict [ 'trt' ] ]
for key , val in rupdict . items ( ) :
yield key , val
yield 'rlzi' , rlzi
yield 'gsim' , repr ( gsim )
|
def create_lzma ( archive , compression , cmd , verbosity , interactive , filenames ) :
"""Create an LZMA archive with the lzma Python module ."""
|
return _create ( archive , compression , cmd , 'alone' , verbosity , filenames )
|
def add_converter ( cls , klass , conv , score = 0 ) :
"""Add converter
: param klass : class or str
: param conv : callable
: param score :
: return :"""
|
if isinstance ( klass , str ) :
klass = import_name ( klass )
item = klass , conv , score
cls . converters . append ( item )
cls . converters . sort ( key = lambda x : x [ 0 ] )
return cls
|
def normalize ( self ) :
"""Reduce trivial AVM conjunctions to just the AVM .
For example , in ` [ ATTR1 [ ATTR2 val ] ] ` the value of ` ATTR1 `
could be a conjunction with the sub - AVM ` [ ATTR2 val ] ` . This
method removes the conjunction so the sub - AVM nests directly
( equivalent to ` [ ATTR1 . ATTR2 val ] ` in TDL ) ."""
|
for attr in self . _avm :
val = self . _avm [ attr ]
if isinstance ( val , Conjunction ) :
val . normalize ( )
if len ( val . terms ) == 1 and isinstance ( val . terms [ 0 ] , AVM ) :
self . _avm [ attr ] = val . terms [ 0 ]
elif isinstance ( val , AVM ) :
val . normalize ( )
|
def _initialize ( self , boto_session , sagemaker_client , sagemaker_runtime_client ) :
"""Initialize this SageMaker Session .
Creates or uses a boto _ session , sagemaker _ client and sagemaker _ runtime _ client .
Sets the region _ name ."""
|
self . boto_session = boto_session or boto3 . Session ( )
self . _region_name = self . boto_session . region_name
if self . _region_name is None :
raise ValueError ( 'Must setup local AWS configuration with a region supported by SageMaker.' )
self . sagemaker_client = sagemaker_client or self . boto_session . client ( 'sagemaker' )
prepend_user_agent ( self . sagemaker_client )
if sagemaker_runtime_client is not None :
self . sagemaker_runtime_client = sagemaker_runtime_client
else :
config = botocore . config . Config ( read_timeout = 80 )
self . sagemaker_runtime_client = self . boto_session . client ( 'runtime.sagemaker' , config = config )
prepend_user_agent ( self . sagemaker_runtime_client )
self . local_mode = False
|
def set_opt ( self , name , value ) :
"""Set option ."""
|
self . cache [ 'opts' ] [ name ] = value
if name == 'compress' :
self . cache [ 'delims' ] = self . def_delims if not value else ( '' , '' , '' )
|
def stop ( self , nowait = False ) :
"""Stop the listener .
This asks the thread to terminate , and then waits for it to do so .
Note that if you don ' t call this before your application exits , there
may be some records still left on the queue , which won ' t be processed .
If nowait is False then thread will handle remaining items in queue and
stop .
If nowait is True then thread will be stopped even if the queue still
contains items ."""
|
self . _stop . set ( )
if nowait :
self . _stop_nowait . set ( )
self . queue . put_nowait ( self . _sentinel_item )
if ( self . _thread . isAlive ( ) and self . _thread is not threading . currentThread ( ) ) :
self . _thread . join ( )
self . _thread = None
|
def is_length_prime ( word ) :
"""Function to check if the length of a given string is a prime number .
Examples :
is _ length _ prime ( ' Hello ' ) = = True
is _ length _ prime ( ' abcdcba ' ) = = True
is _ length _ prime ( ' kittens ' ) = = True
is _ length _ prime ( ' orange ' ) = = False
: param word : A string which needs to be verified for length to be prime .
: return : Returns True if the string ' s length is a prime number , False otherwise ."""
|
n = len ( word )
if n < 2 :
return False
for i in range ( 2 , n ) :
if n % i == 0 :
return False
return True
|
def varnames ( func ) :
"""Return tuple of positional and keywrord argument names for a function ,
method , class or callable .
In case of a class , its ` ` _ _ init _ _ ` ` method is considered .
For methods the ` ` self ` ` parameter is not included ."""
|
cache = getattr ( func , "__dict__" , { } )
try :
return cache [ "_varnames" ]
except KeyError :
pass
if inspect . isclass ( func ) :
try :
func = func . __init__
except AttributeError :
return ( ) , ( )
elif not inspect . isroutine ( func ) : # callable object ?
try :
func = getattr ( func , "__call__" , func )
except Exception :
return ( )
try : # func MUST be a function or method here or we won ' t parse any args
spec = _getargspec ( func )
except TypeError :
return ( ) , ( )
args , defaults = tuple ( spec . args ) , spec . defaults
if defaults :
index = - len ( defaults )
args , defaults = args [ : index ] , tuple ( args [ index : ] )
else :
defaults = ( )
# strip any implicit instance arg
# pypy3 uses " obj " instead of " self " for default dunder methods
implicit_names = ( "self" , ) if not _PYPY3 else ( "self" , "obj" )
if args :
if inspect . ismethod ( func ) or ( "." in getattr ( func , "__qualname__" , ( ) ) and args [ 0 ] in implicit_names ) :
args = args [ 1 : ]
try :
cache [ "_varnames" ] = args , defaults
except TypeError :
pass
return args , defaults
|
def from_dict ( data , ctx ) :
"""Instantiate a new AccountSummary from a dict ( generally from loading a
JSON response ) . The data used to instantiate the AccountSummary is a
shallow copy of the dict passed in , with any complex child types
instantiated appropriately ."""
|
data = data . copy ( )
if data . get ( 'balance' ) is not None :
data [ 'balance' ] = ctx . convert_decimal_number ( data . get ( 'balance' ) )
if data . get ( 'pl' ) is not None :
data [ 'pl' ] = ctx . convert_decimal_number ( data . get ( 'pl' ) )
if data . get ( 'resettablePL' ) is not None :
data [ 'resettablePL' ] = ctx . convert_decimal_number ( data . get ( 'resettablePL' ) )
if data . get ( 'financing' ) is not None :
data [ 'financing' ] = ctx . convert_decimal_number ( data . get ( 'financing' ) )
if data . get ( 'commission' ) is not None :
data [ 'commission' ] = ctx . convert_decimal_number ( data . get ( 'commission' ) )
if data . get ( 'guaranteedExecutionFees' ) is not None :
data [ 'guaranteedExecutionFees' ] = ctx . convert_decimal_number ( data . get ( 'guaranteedExecutionFees' ) )
if data . get ( 'marginRate' ) is not None :
data [ 'marginRate' ] = ctx . convert_decimal_number ( data . get ( 'marginRate' ) )
if data . get ( 'unrealizedPL' ) is not None :
data [ 'unrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'unrealizedPL' ) )
if data . get ( 'NAV' ) is not None :
data [ 'NAV' ] = ctx . convert_decimal_number ( data . get ( 'NAV' ) )
if data . get ( 'marginUsed' ) is not None :
data [ 'marginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginUsed' ) )
if data . get ( 'marginAvailable' ) is not None :
data [ 'marginAvailable' ] = ctx . convert_decimal_number ( data . get ( 'marginAvailable' ) )
if data . get ( 'positionValue' ) is not None :
data [ 'positionValue' ] = ctx . convert_decimal_number ( data . get ( 'positionValue' ) )
if data . get ( 'marginCloseoutUnrealizedPL' ) is not None :
data [ 'marginCloseoutUnrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutUnrealizedPL' ) )
if data . get ( 'marginCloseoutNAV' ) is not None :
data [ 'marginCloseoutNAV' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutNAV' ) )
if data . get ( 'marginCloseoutMarginUsed' ) is not None :
data [ 'marginCloseoutMarginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutMarginUsed' ) )
if data . get ( 'marginCloseoutPercent' ) is not None :
data [ 'marginCloseoutPercent' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutPercent' ) )
if data . get ( 'marginCloseoutPositionValue' ) is not None :
data [ 'marginCloseoutPositionValue' ] = ctx . convert_decimal_number ( data . get ( 'marginCloseoutPositionValue' ) )
if data . get ( 'withdrawalLimit' ) is not None :
data [ 'withdrawalLimit' ] = ctx . convert_decimal_number ( data . get ( 'withdrawalLimit' ) )
if data . get ( 'marginCallMarginUsed' ) is not None :
data [ 'marginCallMarginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginCallMarginUsed' ) )
if data . get ( 'marginCallPercent' ) is not None :
data [ 'marginCallPercent' ] = ctx . convert_decimal_number ( data . get ( 'marginCallPercent' ) )
return AccountSummary ( ** data )
|
def iter_groups ( self ) :
"""generator that yields channel groups as pandas DataFrames . If there
are multiple occurences for the same channel name inside a channel
group , then a counter will be used to make the names unique
( < original _ name > _ < counter > )"""
|
for i , _ in enumerate ( self . groups ) :
yield self . get_group ( i )
|
def send_confirmation_email ( self ) :
"""Sends an email to confirm the new email address .
This method sends out two emails . One to the new email address that
contains the ` ` email _ confirmation _ key ` ` which is used to verify this
this email address with : func : ` User . objects . confirm _ email ` .
The other email is to the old email address to let the user know that
a request is made to change this email address ."""
|
context = { 'user' : self , 'new_email' : self . email_unconfirmed , 'protocol' : get_protocol ( ) , 'confirmation_key' : self . email_confirmation_key , 'site' : Site . objects . get_current ( ) }
# Email to the old address
subject_old = '' . join ( render_to_string ( 'accounts/emails/confirmation_email_subject_old.txt' , context ) . splitlines ( ) )
message_old = render_to_string ( 'accounts/emails/confirmation_email_message_old.txt' , context )
send_mail ( subject_old , message_old , settings . DEFAULT_FROM_EMAIL , [ self . email ] )
# Email to the new address
subject_new = '' . join ( render_to_string ( 'accounts/emails/confirmation_email_subject_new.txt' , context ) . splitlines ( ) )
message_new = render_to_string ( 'accounts/emails/confirmation_email_message_new.txt' , context )
send_mail ( subject_new , message_new , settings . DEFAULT_FROM_EMAIL , [ self . email_unconfirmed , ] )
|
def get_token ( self , user_id , password , redirect_uri , scope = '/activities/update' ) :
"""Get the token .
Parameters
: param user _ id : string
The id of the user used for authentication .
: param password : string
The user password .
: param redirect _ uri : string
The redirect uri of the institution .
: param scope : string
The desired scope . For example ' / activities / update ' ,
' / read - limited ' , etc .
Returns
: returns : string
The token ."""
|
return super ( MemberAPI , self ) . get_token ( user_id , password , redirect_uri , scope )
|
def fraction_done ( self , start = 0.0 , finish = 1.0 , stack = None ) :
''': return float : The estimated fraction of the overall task hierarchy
that has been finished . A number in the range [ 0.0 , 1.0 ] .'''
|
if stack is None :
stack = self . task_stack
if len ( stack ) == 0 :
return start
elif stack [ 0 ] . size == 0 : # Avoid divide by zero
return finish
else :
top_fraction = stack [ 0 ] . progress * 1.0 / stack [ 0 ] . size
next_top_fraction = ( stack [ 0 ] . progress + 1.0 ) / stack [ 0 ] . size
inner_start = start + top_fraction * ( finish - start )
inner_finish = start + next_top_fraction * ( finish - start )
return self . fraction_done ( inner_start , inner_finish , stack [ 1 : ] )
|
def _delete_record ( self , identifier = None , rtype = None , name = None , content = None ) :
"""Delete a record from the hosted zone ."""
|
return self . _change_record_sets ( 'DELETE' , rtype , name , content )
|
def import_ecdsakey_from_pem ( pem , scheme = 'ecdsa-sha2-nistp256' ) :
"""< Purpose >
Import either a public or private ECDSA PEM . In contrast to the other
explicit import functions ( import _ ecdsakey _ from _ public _ pem and
import _ ecdsakey _ from _ private _ pem ) , this function is useful for when it is
not known whether ' pem ' is private or public .
< Arguments >
pem :
A string in PEM format .
scheme :
The signature scheme used by the imported key .
< Exceptions >
securesystemslib . exceptions . FormatError , if ' pem ' is improperly formatted .
< Side Effects >
None .
< Returns >
A dictionary containing the ECDSA keys and other identifying information .
Conforms to ' securesystemslib . formats . ECDSAKEY _ SCHEMA ' ."""
|
# Does ' pem ' have the correct format ?
# This check will ensure arguments has the appropriate number
# of objects and object types , and that all dict keys are properly named .
# Raise ' securesystemslib . exceptions . FormatError ' if the check fails .
securesystemslib . formats . PEMECDSA_SCHEMA . check_match ( pem )
# Is ' scheme ' properly formatted ?
securesystemslib . formats . ECDSA_SCHEME_SCHEMA . check_match ( scheme )
public_pem = ''
private_pem = ''
# Ensure the PEM string has a public or private header and footer . Although
# a simple validation of ' pem ' is performed here , a fully valid PEM string is
# needed later to successfully verify signatures . Performing stricter
# validation of PEMs are left to the external libraries that use ' pem ' .
if is_pem_public ( pem ) :
public_pem = extract_pem ( pem , private_pem = False )
elif is_pem_private ( pem , 'ec' ) : # Return an ecdsakey object ( ECDSAKEY _ SCHEMA ) with the private key included .
return import_ecdsakey_from_private_pem ( pem , password = None )
else :
raise securesystemslib . exceptions . FormatError ( 'PEM contains neither a public' ' nor private key: ' + repr ( pem ) )
# Begin building the ECDSA key dictionary .
ecdsakey_dict = { }
keytype = 'ecdsa-sha2-nistp256'
# Generate the keyid of the ECDSA key . ' key _ value ' corresponds to the
# ' keyval ' entry of the ' ECDSAKEY _ SCHEMA ' dictionary . The private key
# information is not included in the generation of the ' keyid ' identifier .
# If a PEM is found to contain a private key , the generated rsakey object
# should be returned above . The following key object is for the case of a
# PEM with only a public key . Convert any ' \ r \ n ' ( e . g . , Windows ) newline
# characters to ' \ n ' so that a consistent keyid is generated .
key_value = { 'public' : public_pem . replace ( '\r\n' , '\n' ) , 'private' : '' }
keyid = _get_keyid ( keytype , scheme , key_value )
ecdsakey_dict [ 'keytype' ] = keytype
ecdsakey_dict [ 'scheme' ] = scheme
ecdsakey_dict [ 'keyid' ] = keyid
ecdsakey_dict [ 'keyval' ] = key_value
return ecdsakey_dict
|
def abspath ( myPath ) :
import sys , os
"""Get absolute path to resource , works for dev and for PyInstaller"""
|
try : # PyInstaller creates a temp folder and stores path in _ MEIPASS
base_path = sys . _MEIPASS
return os . path . join ( base_path , os . path . basename ( myPath ) )
except Exception :
base_path = os . path . abspath ( os . path . dirname ( __file__ ) )
return os . path . join ( base_path , myPath )
|
def op ( name , data , bucket_count = None , display_name = None , description = None , collections = None ) :
"""Create a legacy histogram summary op .
Arguments :
name : A unique name for the generated summary node .
data : A ` Tensor ` of any shape . Must be castable to ` float64 ` .
bucket _ count : Optional positive ` int ` . The output will have this
many buckets , except in two edge cases . If there is no data , then
there are no buckets . If there is data but all points have the
same value , then there is one bucket whose left and right
endpoints are the same .
display _ name : Optional name for this summary in TensorBoard , as a
constant ` str ` . Defaults to ` name ` .
description : Optional long - form description for this summary , as a
constant ` str ` . Markdown is supported . Defaults to empty .
collections : Optional list of graph collections keys . The new
summary op is added to these collections . Defaults to
` [ Graph Keys . SUMMARIES ] ` .
Returns :
A TensorFlow summary op ."""
|
# TODO ( nickfelt ) : remove on - demand imports once dep situation is fixed .
import tensorflow . compat . v1 as tf
if display_name is None :
display_name = name
summary_metadata = metadata . create_summary_metadata ( display_name = display_name , description = description )
with tf . name_scope ( name ) :
tensor = _buckets ( data , bucket_count = bucket_count )
return tf . summary . tensor_summary ( name = 'histogram_summary' , tensor = tensor , collections = collections , summary_metadata = summary_metadata )
|
def setConfigKey ( key , value ) :
"""Sets the config data value for the specified dictionary key"""
|
configFile = ConfigurationManager . _configFile ( )
return JsonDataManager ( configFile ) . setKey ( key , value )
|
def start ( self ) :
"""Start the sensor"""
|
# open device
openni2 . initialize ( PrimesenseSensor . OPENNI2_PATH )
self . _device = openni2 . Device . open_any ( )
# open depth stream
self . _depth_stream = self . _device . create_depth_stream ( )
self . _depth_stream . configure_mode ( PrimesenseSensor . DEPTH_IM_WIDTH , PrimesenseSensor . DEPTH_IM_HEIGHT , PrimesenseSensor . FPS , openni2 . PIXEL_FORMAT_DEPTH_1_MM )
self . _depth_stream . start ( )
# open color stream
self . _color_stream = self . _device . create_color_stream ( )
self . _color_stream . configure_mode ( PrimesenseSensor . COLOR_IM_WIDTH , PrimesenseSensor . COLOR_IM_HEIGHT , PrimesenseSensor . FPS , openni2 . PIXEL_FORMAT_RGB888 )
self . _color_stream . camera . set_auto_white_balance ( self . _auto_white_balance )
self . _color_stream . camera . set_auto_exposure ( self . _auto_exposure )
self . _color_stream . start ( )
# configure device
if self . _registration_mode == PrimesenseRegistrationMode . DEPTH_TO_COLOR :
self . _device . set_image_registration_mode ( openni2 . IMAGE_REGISTRATION_DEPTH_TO_COLOR )
else :
self . _device . set_image_registration_mode ( openni2 . IMAGE_REGISTRATION_OFF )
self . _device . set_depth_color_sync_enabled ( self . _enable_depth_color_sync )
self . _running = True
|
def write ( self , content ) :
"""Save content on disk"""
|
with io . open ( self . target , 'w' , encoding = 'utf-8' ) as fp :
fp . write ( content )
if not content . endswith ( u'\n' ) :
fp . write ( u'\n' )
|
def request ( self , host , handler , request_body , verbose ) :
"""Make an xmlrpc request ."""
|
headers = { 'User-Agent' : self . user_agent , # Proxy - Connection ' : ' Keep - Alive ' ,
# ' Content - Range ' : ' bytes oxy1.0 / - 1 ' ,
'Accept' : 'text/xml' , 'Content-Type' : 'text/xml' }
url = self . _build_url ( host , handler )
try :
resp = requests . post ( url , data = request_body , headers = headers )
except ValueError :
raise
except Exception :
raise
# something went wrong
else :
try :
resp . raise_for_status ( )
except requests . RequestException as e :
raise xmlrpc . ProtocolError ( url , resp . status_code , str ( e ) , resp . headers )
else :
return self . parse_response ( resp )
|
def set_webhook_handler ( self , scope , callback ) :
"""Allows adding a webhook _ handler as an alternative to the decorators"""
|
scope = scope . lower ( )
if scope == 'after_send' :
self . _after_send = callback
return
if scope not in Page . WEBHOOK_ENDPOINTS :
raise ValueError ( "The 'scope' argument must be one of {}." . format ( Page . WEBHOOK_ENDPOINTS ) )
self . _webhook_handlers [ scope ] = callback
|
def clear_provider ( self ) :
"""Removes the provider .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *"""
|
if ( self . get_provider_metadata ( ) . is_read_only ( ) or self . get_provider_metadata ( ) . is_required ( ) ) :
raise errors . NoAccess ( )
self . _my_map [ 'providerId' ] = self . _provider_default
|
def display_url ( target ) :
"""Displaying URL in an IPython notebook to allow the user to click and check on information . With thanks to Fernando Perez for putting together the implementation !
: param target : the url to display .
: type target : string ."""
|
prefix = u"http://" if not target . startswith ( "http" ) else u""
target = prefix + target
display ( HTML ( u'<a href="{t}" target=_blank>{t}</a>' . format ( t = target ) ) )
|
def get_readonly_fields ( self , request , obj = None ) :
"""Set all fields readonly ."""
|
return list ( self . readonly_fields ) + [ field . name for field in obj . _meta . fields ]
|
def remove_edge ( self , p_from , p_to , p_remove_unconnected_nodes = True ) :
"""Removes an edge from the graph .
When remove _ unconnected _ nodes is True , then the nodes are also removed
if they become isolated ."""
|
if self . has_edge ( p_from , p_to ) :
self . _edges [ p_from ] . remove ( p_to )
try :
del self . _edge_numbers [ ( p_from , p_to ) ]
except KeyError :
return None
if p_remove_unconnected_nodes :
if self . is_isolated ( p_from ) :
self . remove_node ( p_from )
if self . is_isolated ( p_to ) :
self . remove_node ( p_to )
|
def get_batch_children ( self ) :
"""Retrieves batch child tasks for this task if its a batch task .
: return : Collection instance .
: raises SbError if task is not a batch task ."""
|
if not self . batch :
raise SbgError ( "This task is not a batch task." )
return self . query ( parent = self . id , api = self . _api )
|
def get_ephemerides ( self , observatory_code , airmass_lessthan = 99 , solar_elongation = ( 0 , 180 ) , skip_daylight = False ) :
"""Call JPL HORIZONS website to obtain ephemerides based on the
provided targetname , epochs , and observatory _ code . For a list
of valid observatory codes , refer to
http : / / minorplanetcenter . net / iau / lists / ObsCodesF . html
: param observatory _ code : str / int ;
observer ' s location code according to Minor Planet Center
: param airmass _ lessthan : float ;
maximum airmass ( optional , default : 99)
: param solar _ elongation : tuple ;
permissible solar elongation range ( optional , deg )
: param skip _ daylight : boolean ;
crop daylight epoch during query ( optional )
: result : int ; number of epochs queried
: example : > > > ceres = callhorizons . query ( ' Ceres ' )
> > > ceres . set _ epochrange ( ' 2016-02-23 00:00 ' , ' 2016-02-24 00:00 ' , ' 1h ' )
> > > print ( ceres . get _ ephemerides ( 568 ) , ' epochs queried ' )
The queried properties and their definitions are :
| Property | Definition |
| targetname | official number , name , designation [ string ] |
| H | absolute magnitude in V band ( float , mag ) |
| G | photometric slope parameter ( float ) |
| datetime | epoch date and time ( str , YYYY - MM - DD HH : MM : SS ) |
| datetime _ jd | epoch Julian Date ( float ) |
| solar _ presence | information on Sun ' s presence ( str ) |
| lunar _ presence | information on Moon ' s presence ( str ) |
| RA | target RA ( float , J2000.0 ) |
| DEC | target DEC ( float , J2000.0 ) |
| RA _ rate | target rate RA ( float , arcsec / s ) |
| DEC _ rate | target RA ( float , arcsec / s , includes cos ( DEC ) ) |
| AZ | Azimuth meas East ( 90 ) of North ( 0 ) ( float , deg ) |
| EL | Elevation ( float , deg ) |
| airmass | target optical airmass ( float ) |
| magextinct | V - mag extinction due airmass ( float , mag ) |
| V | V magnitude ( comets : total mag ) ( float , mag ) |
| illumination | fraction of illuminated disk ( float ) |
| EclLon | heliocentr . ecl . long . ( float , deg , J2000.0 ) |
| EclLat | heliocentr . ecl . lat . ( float , deg , J2000.0 ) |
| ObsEclLon | obscentr . ecl . long . ( float , deg , J2000.0 ) |
| ObsEclLat | obscentr . ecl . lat . ( float , deg , J2000.0 ) |
| r | heliocentric distance ( float , au ) |
| r _ rate | heliocentric radial rate ( float , km / s ) |
| delta | distance from the observer ( float , au ) |
| delta _ rate | obs - centric radial rate ( float , km / s ) |
| lighttime | one - way light time ( float , s ) |
| elong | solar elongation ( float , deg ) |
| elongFlag | app . position relative to Sun ( str ) |
| alpha | solar phase angle ( float , deg ) |
| sunTargetPA | PA of Sun - > target vector ( float , deg , EoN ) |
| velocityPA | PA of velocity vector ( float , deg , EoN ) |
| GlxLon | galactic longitude ( float , deg ) |
| GlxLat | galactic latitude ( float , deg ) |
| RA _ 3sigma | 3sigma pos . unc . in RA ( float , arcsec ) |
| DEC _ 3sigma | 3sigma pos . unc . in DEC ( float , arcsec ) |"""
|
# queried fields ( see HORIZONS website for details )
# if fields are added here , also update the field identification below
quantities = '1,3,4,8,9,10,18,19,20,21,23,24,27,31,33,36'
# encode objectname for use in URL
objectname = urllib . quote ( self . targetname . encode ( "utf8" ) )
# construct URL for HORIZONS query
url = "https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l" + "&TABLE_TYPE='OBSERVER'" + "&QUANTITIES='" + str ( quantities ) + "'" + "&CSV_FORMAT='YES'" + "&ANG_FORMAT='DEG'" + "&CAL_FORMAT='BOTH'" + "&SOLAR_ELONG='" + str ( solar_elongation [ 0 ] ) + "," + str ( solar_elongation [ 1 ] ) + "'" + "&CENTER='" + str ( observatory_code ) + "'"
if self . not_smallbody :
url += "&COMMAND='" + urllib . quote ( self . targetname . encode ( "utf8" ) ) + "'"
elif self . cap and self . comet :
for ident in self . parse_comet ( ) :
if ident is not None :
break
if ident is None :
ident = self . targetname
url += "&COMMAND='DES=" + urllib . quote ( ident . encode ( "utf8" ) ) + "%3B" + ( "CAP'" if self . cap else "'" )
elif self . isorbit_record ( ) : # Comet orbit record . Do not use DES , CAP . This test must
# occur before asteroid test .
url += "&COMMAND='" + urllib . quote ( self . targetname . encode ( "utf8" ) ) + "%3B'"
elif self . isasteroid ( ) and not self . comet : # for asteroids , use ' DES = " designation " ; '
for ident in self . parse_asteroid ( ) :
if ident is not None :
break
if ident is None :
ident = self . targetname
url += "&COMMAND='" + urllib . quote ( str ( ident ) . encode ( "utf8" ) ) + "%3B'"
elif self . iscomet ( ) and not self . asteroid : # for comets , potentially append the current apparition
# ( CAP ) parameter , or the fragmentation flag ( NOFRAG )
for ident in self . parse_comet ( ) :
if ident is not None :
break
if ident is None :
ident = self . targetname
url += "&COMMAND='DES=" + urllib . quote ( ident . encode ( "utf8" ) ) + "%3B" + ( "NOFRAG%3B" if self . nofrag else "" ) + ( "CAP'" if self . cap else "'" )
# elif ( not self . targetname . replace ( ' ' , ' ' ) . isalpha ( ) and not
# self . targetname . isdigit ( ) and not
# self . targetname . islower ( ) and not
# self . targetname . isupper ( ) ) :
# # lower case + upper case + numbers = pot . case sensitive designation
# url + = " & COMMAND = ' DES = " + \
# urllib . quote ( self . targetname . encode ( " utf8 " ) ) + " % 3B ' "
else :
url += "&COMMAND='" + urllib . quote ( self . targetname . encode ( "utf8" ) ) + "%3B'"
if self . discreteepochs is not None :
url += "&TLIST="
for date in self . discreteepochs :
url += "'" + str ( date ) + "'"
elif ( self . start_epoch is not None and self . stop_epoch is not None and self . step_size is not None ) :
url += "&START_TIME='" + urllib . quote ( self . start_epoch . encode ( "utf8" ) ) + "'" + "&STOP_TIME='" + urllib . quote ( self . stop_epoch . encode ( "utf8" ) ) + "'" + "&STEP_SIZE='" + str ( self . step_size ) + "'"
else :
raise IOError ( 'no epoch information given' )
if airmass_lessthan < 99 :
url += "&AIRMASS='" + str ( airmass_lessthan ) + "'"
if skip_daylight :
url += "&SKIP_DAYLT='YES'"
else :
url += "&SKIP_DAYLT='NO'"
self . url = url
# print ( url )
# call HORIZONS
i = 0
# count number of connection tries
while True :
try :
src = urllib . urlopen ( url ) . readlines ( )
break
except urllib . URLError :
time . sleep ( 0.1 )
# in case the HORIZONS website is blocked ( due to another query )
# wait 0.1 second and try again
i += 1
if i > 50 :
return 0
# website could not be reached
# disseminate website source code
# identify header line and extract data block ( ephemerides data )
# also extract targetname , absolute mag . ( H ) , and slope parameter ( G )
headerline = [ ]
datablock = [ ]
in_datablock = False
H , G = np . nan , np . nan
for idx , line in enumerate ( src ) :
line = line . decode ( 'UTF-8' )
if "Date__(UT)__HR:MN" in line :
headerline = line . split ( ',' )
if "$$EOE\n" in line :
in_datablock = False
if in_datablock :
datablock . append ( line )
if "$$SOE\n" in line :
in_datablock = True
if "Target body name" in line :
targetname = line [ 18 : 50 ] . strip ( )
if ( "rotational period in hours)" in src [ idx ] . decode ( 'UTF-8' ) ) :
HGline = src [ idx + 2 ] . decode ( 'UTF-8' ) . split ( '=' )
if 'B-V' in HGline [ 2 ] and 'G' in HGline [ 1 ] :
try :
H = float ( HGline [ 1 ] . rstrip ( 'G' ) )
except ValueError :
pass
try :
G = float ( HGline [ 2 ] . rstrip ( 'B-V' ) )
except ValueError :
pass
if ( "Multiple major-bodies match string" in src [ idx ] . decode ( 'UTF-8' ) or ( "Matching small-bodies" in src [ idx ] . decode ( 'UTF-8' ) and not "No matches found" in src [ idx + 1 ] . decode ( 'UTF-8' ) ) ) :
raise ValueError ( 'Ambiguous target name; check URL: %s' % url )
if ( "Matching small-bodies" in src [ idx ] . decode ( 'UTF-8' ) and "No matches found" in src [ idx + 1 ] . decode ( 'UTF-8' ) ) :
raise ValueError ( 'Unknown target; check URL: %s' % url )
# field identification for each line
ephemerides = [ ]
for line in datablock :
line = line . split ( ',' )
# ignore line that don ' t hold any data
if len ( line ) < len ( quantities . split ( ',' ) ) :
continue
this_eph = [ ]
fieldnames = [ ]
datatypes = [ ]
# create a dictionary for each date ( each line )
for idx , item in enumerate ( headerline ) :
if ( 'Date__(UT)__HR:MN' in item ) :
this_eph . append ( line [ idx ] . strip ( ) )
fieldnames . append ( 'datetime' )
datatypes . append ( object )
if ( 'Date_________JDUT' in item ) :
this_eph . append ( np . float64 ( line [ idx ] ) )
fieldnames . append ( 'datetime_jd' )
datatypes . append ( np . float64 )
# read out and convert solar presence
try :
this_eph . append ( { '*' : 'daylight' , 'C' : 'civil twilight' , 'N' : 'nautical twilight' , 'A' : 'astronomical twilight' , ' ' : 'dark' , 't' : 'transiting' } [ line [ idx + 1 ] ] )
except KeyError :
this_eph . append ( 'n.a.' )
fieldnames . append ( 'solar_presence' )
datatypes . append ( object )
# read out and convert lunar presence
try :
this_eph . append ( { 'm' : 'moonlight' , ' ' : 'dark' } [ line [ idx + 2 ] ] )
except KeyError :
this_eph . append ( 'n.a.' )
fieldnames . append ( 'lunar_presence' )
datatypes . append ( object )
if ( item . find ( 'R.A._(ICRF/J2000.0)' ) > - 1 ) :
this_eph . append ( np . float64 ( line [ idx ] ) )
fieldnames . append ( 'RA' )
datatypes . append ( np . float64 )
if ( item . find ( 'DEC_(ICRF/J2000.0)' ) > - 1 ) :
this_eph . append ( np . float64 ( line [ idx ] ) )
fieldnames . append ( 'DEC' )
datatypes . append ( np . float64 )
if ( item . find ( 'dRA*cosD' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) / 3600. )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'RA_rate' )
datatypes . append ( np . float64 )
if ( item . find ( 'd(DEC)/dt' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) / 3600. )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'DEC_rate' )
datatypes . append ( np . float64 )
if ( item . find ( 'Azi_(a-app)' ) > - 1 ) :
try : # if AZ not given , e . g . for space telescopes
this_eph . append ( np . float64 ( line [ idx ] ) )
fieldnames . append ( 'AZ' )
datatypes . append ( np . float64 )
except ValueError :
pass
if ( item . find ( 'Elev_(a-app)' ) > - 1 ) :
try : # if EL not given , e . g . for space telescopes
this_eph . append ( np . float64 ( line [ idx ] ) )
fieldnames . append ( 'EL' )
datatypes . append ( np . float64 )
except ValueError :
pass
if ( item . find ( 'a-mass' ) > - 1 ) :
try : # if airmass not given , e . g . for space telescopes
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'airmass' )
datatypes . append ( np . float64 )
if ( item . find ( 'mag_ex' ) > - 1 ) :
try : # if mag _ ex not given , e . g . for space telescopes
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'magextinct' )
datatypes . append ( np . float64 )
if ( item . find ( 'APmag' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'V' )
datatypes . append ( np . float64 )
if ( item . find ( 'Illu%' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'illumination' )
datatypes . append ( np . float64 )
if ( item . find ( 'hEcl-Lon' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'EclLon' )
datatypes . append ( np . float64 )
if ( item . find ( 'hEcl-Lat' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'EclLat' )
datatypes . append ( np . float64 )
if ( item . find ( 'ObsEcLon' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'ObsEclLon' )
datatypes . append ( np . float64 )
if ( item . find ( 'ObsEcLat' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'ObsEclLat' )
datatypes . append ( np . float64 )
if ( item . find ( ' r' ) > - 1 ) and ( headerline [ idx + 1 ] . find ( "rdot" ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'r' )
datatypes . append ( np . float64 )
if ( item . find ( 'rdot' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'r_rate' )
datatypes . append ( np . float64 )
if ( item . find ( 'delta' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'delta' )
datatypes . append ( np . float64 )
if ( item . find ( 'deldot' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'delta_rate' )
datatypes . append ( np . float64 )
if ( item . find ( '1-way_LT' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) * 60. )
# seconds
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'lighttime' )
datatypes . append ( np . float64 )
if ( item . find ( 'S-O-T' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'elong' )
datatypes . append ( np . float64 )
# in the case of space telescopes , ' / r S - T - O ' is used ;
# ground - based telescopes have both parameters in separate
# columns
if ( item . find ( '/r S-T-O' ) > - 1 ) :
this_eph . append ( { '/L' : 'leading' , '/T' : 'trailing' } [ line [ idx ] . split ( ) [ 0 ] ] )
fieldnames . append ( 'elongFlag' )
datatypes . append ( object )
try :
this_eph . append ( np . float64 ( line [ idx ] . split ( ) [ 1 ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'alpha' )
datatypes . append ( np . float64 )
elif ( item . find ( 'S-T-O' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'alpha' )
datatypes . append ( np . float64 )
elif ( item . find ( '/r' ) > - 1 ) :
this_eph . append ( { '/L' : 'leading' , '/T' : 'trailing' , '/?' : 'not defined' } [ line [ idx ] ] )
fieldnames . append ( 'elongFlag' )
datatypes . append ( object )
if ( item . find ( 'PsAng' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'sunTargetPA' )
datatypes . append ( np . float64 )
if ( item . find ( 'PsAMV' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'velocityPA' )
datatypes . append ( np . float64 )
if ( item . find ( 'GlxLon' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'GlxLon' )
datatypes . append ( np . float64 )
if ( item . find ( 'GlxLat' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'GlxLat' )
datatypes . append ( np . float64 )
if ( item . find ( 'RA_3sigma' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'RA_3sigma' )
datatypes . append ( np . float64 )
if ( item . find ( 'DEC_3sigma' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'DEC_3sigma' )
datatypes . append ( np . float64 )
# in the case of a comet , use total mag for V
if ( item . find ( 'T-mag' ) > - 1 ) :
try :
this_eph . append ( np . float64 ( line [ idx ] ) )
except ValueError :
this_eph . append ( np . nan )
fieldnames . append ( 'V' )
datatypes . append ( np . float64 )
# append target name
this_eph . append ( targetname )
fieldnames . append ( 'targetname' )
datatypes . append ( object )
# append H
this_eph . append ( H )
fieldnames . append ( 'H' )
datatypes . append ( np . float64 )
# append G
this_eph . append ( G )
fieldnames . append ( 'G' )
datatypes . append ( np . float64 )
if len ( this_eph ) > 0 :
ephemerides . append ( tuple ( this_eph ) )
if len ( ephemerides ) == 0 :
return 0
# combine ephemerides with column names and data types into ndarray
assert len ( ephemerides [ 0 ] ) == len ( fieldnames ) == len ( datatypes )
self . data = np . array ( ephemerides , dtype = [ ( str ( fieldnames [ i ] ) , datatypes [ i ] ) for i in range ( len ( fieldnames ) ) ] )
return len ( self )
|
def _get_point_rates ( self , source , mmin , mmax = np . inf ) :
"""Adds the rates for a point source
: param source :
Point source as instance of : class :
openquake . hazardlib . source . point . PointSource
: param float mmin :
Minimum Magnitude
: param float mmax :
Maximum Magnitude"""
|
src_mesh = Mesh . from_points_list ( [ source . location ] )
in_poly = self . limits . intersects ( src_mesh ) [ 0 ]
if not in_poly :
return
else :
for ( mag , rate ) in source . get_annual_occurrence_rates ( ) :
if ( mag < mmin ) or ( mag > mmax ) :
return
else :
for ( prob , depth ) in source . hypocenter_distribution . data :
if ( depth < self . upper_depth ) or ( depth > self . lower_depth ) :
continue
else :
self . rates += ( prob * rate )
|
def _authenticate_websocket ( self , websocket , event_handler ) :
"""Sends a authentication challenge over a websocket .
This is not needed when we just send the cookie we got on login
when connecting to the websocket ."""
|
log . debug ( 'Authenticating websocket' )
json_data = json . dumps ( { "seq" : 1 , "action" : "authentication_challenge" , "data" : { "token" : self . _token } } ) . encode ( 'utf8' )
yield from websocket . send ( json_data )
while True :
message = yield from websocket . recv ( )
status = json . loads ( message )
log . debug ( status )
# We want to pass the events to the event _ handler already
# because the hello event could arrive before the authentication ok response
yield from event_handler ( message )
if ( 'status' in status and status [ 'status' ] == 'OK' ) and ( 'seq_reply' in status and status [ 'seq_reply' ] == 1 ) :
log . info ( 'Websocket authentification OK' )
return True
elif 'seq_reply' in status and status [ 'seq_reply' ] == 1 :
log . error ( 'Websocket authentification failed' )
|
def prepare ( self ) :
"""Prepare the session , setting up the session object and loading in
the values , assigning the IP address to the session if it ' s an new one ."""
|
super ( SessionRequestHandler , self ) . prepare ( )
result = yield gen . Task ( self . start_session )
LOGGER . debug ( 'Exiting SessionRequestHandler.prepare: %r' , result )
|
def calc_qv_v1 ( self ) :
"""Calculate the discharge of both forelands after Manning - Strickler .
Required control parameters :
| EKV |
| SKV |
| Gef |
Required flux sequence :
| AV |
| UV |
Calculated flux sequence :
| lstream _ fluxes . QV |
Examples :
For appropriate strictly positive values :
> > > from hydpy . models . lstream import *
> > > parameterstep ( )
> > > ekv ( 2.0)
> > > skv ( 50.0)
> > > gef ( 0.01)
> > > fluxes . av = 3.0
> > > fluxes . uv = 7.0
> > > model . calc _ qv _ v1 ( )
> > > fluxes . qv
qv ( 17.053102 , 17.053102)
For zero or negative values of the flown through surface or
the wetted perimeter :
> > > fluxes . av = - 1.0 , 3.0
> > > fluxes . uv = 7.0 , 0.0
> > > model . calc _ qv _ v1 ( )
> > > fluxes . qv
qv ( 0.0 , 0.0)"""
|
con = self . parameters . control . fastaccess
flu = self . sequences . fluxes . fastaccess
for i in range ( 2 ) :
if ( flu . av [ i ] > 0. ) and ( flu . uv [ i ] > 0. ) :
flu . qv [ i ] = ( con . ekv [ i ] * con . skv [ i ] * flu . av [ i ] ** ( 5. / 3. ) / flu . uv [ i ] ** ( 2. / 3. ) * con . gef ** .5 )
else :
flu . qv [ i ] = 0.
|
def _update_roster ( self ) :
'''Update default flat roster with the passed in information .
: return :'''
|
roster_file = self . _get_roster ( )
if os . access ( roster_file , os . W_OK ) :
if self . __parsed_rosters [ self . ROSTER_UPDATE_FLAG ] :
with salt . utils . files . fopen ( roster_file , 'a' ) as roster_fp :
roster_fp . write ( '# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n host: ' '{hostname}\n user: {user}' '\n passwd: {passwd}\n' . format ( s_user = getpass . getuser ( ) , s_time = datetime . datetime . utcnow ( ) . isoformat ( ) , hostname = self . opts . get ( 'tgt' , '' ) , user = self . opts . get ( 'ssh_user' , '' ) , passwd = self . opts . get ( 'ssh_passwd' , '' ) ) )
log . info ( 'The host {0} has been added to the roster {1}' . format ( self . opts . get ( 'tgt' , '' ) , roster_file ) )
else :
log . error ( 'Unable to update roster {0}: access denied' . format ( roster_file ) )
|
def validate_overlap ( comp1 , comp2 , force ) :
"""Validate the overlap between the wavelength sets
of the two given components .
Parameters
comp1 , comp2 : ` ~ pysynphot . spectrum . SourceSpectrum ` or ` ~ pysynphot . spectrum . SpectralElement `
Source spectrum and bandpass of an observation .
force : { ' extrap ' , ' taper ' , ` None ` }
If not ` None ` , the components may be adjusted by
extrapolation or tapering .
Returns
comp1 , comp2
Same as inputs . However , ` ` comp1 ` ` might be tapered
if that option is selected .
warnings : dict
Maps warning keyword to its description .
Raises
KeyError
Invalid ` ` force ` ` .
pysynphot . exceptions . DisjointError
No overlap detected when ` ` force ` ` is ` None ` .
pysynphot . exceptions . PartialOverlap
Partial overlap detected when ` ` force ` ` is ` None ` ."""
|
warnings = dict ( )
if force is None :
stat = comp2 . check_overlap ( comp1 )
if stat == 'full' :
pass
elif stat == 'partial' :
raise ( exceptions . PartialOverlap ( 'Spectrum and bandpass do not fully overlap. You may use force=[extrap|taper] to force this Observation anyway.' ) )
elif stat == 'none' :
raise ( exceptions . DisjointError ( 'Spectrum and bandpass are disjoint' ) )
elif force . lower ( ) == 'taper' :
try :
comp1 = comp1 . taper ( )
except AttributeError :
comp1 = comp1 . tabulate ( ) . taper ( )
warnings [ 'PartialOverlap' ] = force
elif force . lower ( ) . startswith ( 'extrap' ) : # default behavior works , but check the overlap so we can set the warning
stat = comp2 . check_overlap ( comp1 )
if stat == 'partial' :
warnings [ 'PartialOverlap' ] = force
else :
raise ( KeyError ( "Illegal value force=%s; legal values=('taper','extrap')" % force ) )
return comp1 , comp2 , warnings
|
def samples_to_batches ( samples : Iterable , batch_size : int ) :
"""Chunk a series of network inputs and outputs into larger batches"""
|
it = iter ( samples )
while True :
with suppress ( StopIteration ) :
batch_in , batch_out = [ ] , [ ]
for i in range ( batch_size ) :
sample_in , sample_out = next ( it )
batch_in . append ( sample_in )
batch_out . append ( sample_out )
if not batch_in :
raise StopIteration
yield np . array ( batch_in ) , np . array ( batch_out )
|
def decode_unicode_obj ( obj ) :
"""Decode unicoded dict / list / tuple encoded by ` unicode _ obj `"""
|
if isinstance ( obj , dict ) :
r = { }
for k , v in iteritems ( obj ) :
r [ decode_unicode_string ( k ) ] = decode_unicode_obj ( v )
return r
elif isinstance ( obj , six . string_types ) :
return decode_unicode_string ( obj )
elif isinstance ( obj , ( list , tuple ) ) :
return [ decode_unicode_obj ( x ) for x in obj ]
else :
return obj
|
async def close_async ( self ) :
"""Close the client asynchronously . This includes closing the Session
and CBS authentication layer as well as the Connection .
If the client was opened using an external Connection ,
this will be left intact ."""
|
if self . message_handler :
await self . message_handler . destroy_async ( )
self . message_handler = None
self . _shutdown = True
if self . _keep_alive_thread :
await self . _keep_alive_thread
self . _keep_alive_thread = None
if not self . _session :
return
# already closed .
if not self . _connection . cbs :
_logger . info ( "Closing non-CBS session." )
await asyncio . shield ( self . _session . destroy_async ( ) )
else :
_logger . info ( "CBS session pending %r." , self . _connection . container_id )
self . _session = None
if not self . _ext_connection :
_logger . info ( "Closing exclusive connection %r." , self . _connection . container_id )
await asyncio . shield ( self . _connection . destroy_async ( ) )
else :
_logger . info ( "Shared connection remaining open." )
self . _connection = None
|
def _handle_offset_error ( self , failure ) :
"""Retry the offset fetch request if appropriate .
Once the : attr : ` . retry _ delay ` reaches our : attr : ` . retry _ max _ delay ` , we
log a warning . This should perhaps be extended to abort sooner on
certain errors ."""
|
# outstanding request got errback ' d , clear it
self . _request_d = None
if self . _stopping and failure . check ( CancelledError ) : # Not really an error
return
# Do we need to abort ?
if ( self . request_retry_max_attempts != 0 and self . _fetch_attempt_count >= self . request_retry_max_attempts ) :
log . debug ( "%r: Exhausted attempts: %d fetching offset from kafka: %r" , self , self . request_retry_max_attempts , failure )
self . _start_d . errback ( failure )
return
# Decide how to log this failure . . . If we have retried so many times
# we ' re at the retry _ max _ delay , then we log at warning every other time
# debug otherwise
if ( self . retry_delay < self . retry_max_delay or 0 == ( self . _fetch_attempt_count % 2 ) ) :
log . debug ( "%r: Failure fetching offset from kafka: %r" , self , failure )
else : # We ' ve retried until we hit the max delay , log at warn
log . warning ( "%r: Still failing fetching offset from kafka: %r" , self , failure )
self . _retry_fetch ( )
|
def run ( bam_file , data , fastqc_out ) :
"""Run fastqc , generating report in specified directory and parsing metrics .
Downsamples to 10 million reads to avoid excessive processing times with large
files , unless we ' re running a Standard / smallRNA - seq / QC pipeline .
Handles fastqc 0.11 + , which use a single HTML file and older versions that use
a directory of files + images . The goal is to eventually move to only 0.11 +"""
|
sentry_file = os . path . join ( fastqc_out , "fastqc_report.html" )
if not os . path . exists ( sentry_file ) :
work_dir = os . path . dirname ( fastqc_out )
utils . safe_makedir ( work_dir )
ds_file = ( bam . downsample ( bam_file , data , 1e7 , work_dir = work_dir ) if data . get ( "analysis" , "" ) . lower ( ) not in [ "standard" , "smallrna-seq" ] else None )
if ds_file is not None :
bam_file = ds_file
frmt = "bam" if bam_file . endswith ( "bam" ) else "fastq"
fastqc_name = utils . splitext_plus ( os . path . basename ( bam_file ) ) [ 0 ]
fastqc_clean_name = dd . get_sample_name ( data )
num_cores = data [ "config" ] [ "algorithm" ] . get ( "num_cores" , 1 )
with tx_tmpdir ( data , work_dir ) as tx_tmp_dir :
with utils . chdir ( tx_tmp_dir ) :
cl = [ config_utils . get_program ( "fastqc" , data [ "config" ] ) , "-d" , tx_tmp_dir , "-t" , str ( num_cores ) , "--extract" , "-o" , tx_tmp_dir , "-f" , frmt , bam_file ]
cl = "%s %s %s" % ( utils . java_freetype_fix ( ) , utils . local_path_export ( ) , " " . join ( [ str ( x ) for x in cl ] ) )
do . run ( cl , "FastQC: %s" % dd . get_sample_name ( data ) )
tx_fastqc_out = os . path . join ( tx_tmp_dir , "%s_fastqc" % fastqc_name )
tx_combo_file = os . path . join ( tx_tmp_dir , "%s_fastqc.html" % fastqc_name )
if not os . path . exists ( sentry_file ) and os . path . exists ( tx_combo_file ) :
utils . safe_makedir ( fastqc_out )
# Use sample name for reports instead of bam file name
with open ( os . path . join ( tx_fastqc_out , "fastqc_data.txt" ) , 'r' ) as fastqc_bam_name , open ( os . path . join ( tx_fastqc_out , "_fastqc_data.txt" ) , 'w' ) as fastqc_sample_name :
for line in fastqc_bam_name :
fastqc_sample_name . write ( line . replace ( os . path . basename ( bam_file ) , fastqc_clean_name ) )
shutil . move ( os . path . join ( tx_fastqc_out , "_fastqc_data.txt" ) , os . path . join ( fastqc_out , 'fastqc_data.txt' ) )
shutil . move ( tx_combo_file , sentry_file )
if os . path . exists ( "%s.zip" % tx_fastqc_out ) :
shutil . move ( "%s.zip" % tx_fastqc_out , os . path . join ( fastqc_out , "%s.zip" % fastqc_clean_name ) )
elif not os . path . exists ( sentry_file ) :
raise ValueError ( "FastQC failed to produce output HTML file: %s" % os . listdir ( tx_tmp_dir ) )
logger . info ( "Produced HTML report %s" % sentry_file )
parser = FastQCParser ( fastqc_out , dd . get_sample_name ( data ) )
stats = parser . get_fastqc_summary ( )
parser . save_sections_into_file ( )
return stats
|
def transfer_learning_tuner ( self , additional_parents = None , estimator = None ) :
"""Creates a new ` ` HyperparameterTuner ` ` by copying the request fields from the provided parent to the new
instance of ` ` HyperparameterTuner ` ` . Followed by addition of warm start configuration with the type as
" TransferLearning " and parents as the union of provided list of ` ` additional _ parents ` ` and the ` ` self ` ` .
Also , training image in the new tuner ' s estimator is updated with the provided ` ` training _ image ` ` .
Args :
additional _ parents ( set { str } ) : Set of additional parents along with the self to be used in warm starting
the transfer learning tuner .
estimator ( sagemaker . estimator . EstimatorBase ) : An estimator object that has been initialized with
the desired configuration . There does not need to be a training job associated with this instance .
Returns :
sagemaker . tuner . HyperparameterTuner : ` ` HyperparameterTuner ` ` instance which can be used to launch transfer
learning tuning job .
Examples :
> > > parent _ tuner = HyperparameterTuner . attach ( tuning _ job _ name = " parent - job - 1 " )
> > > transfer _ learning _ tuner = parent _ tuner . transfer _ learning _ tuner ( additional _ parents = { " parent - job - 2 " } )
Later On :
> > > transfer _ learning _ tuner . fit ( inputs = { } )"""
|
return self . _create_warm_start_tuner ( additional_parents = additional_parents , warm_start_type = WarmStartTypes . TRANSFER_LEARNING , estimator = estimator )
|
def from_cli ( opt , dyn_range_fac = 1 , precision = 'single' , inj_filter_rejector = None ) :
"""Parses the CLI options related to strain data reading and conditioning .
Parameters
opt : object
Result of parsing the CLI with OptionParser , or any object with the
required attributes ( gps - start - time , gps - end - time , strain - high - pass ,
pad - data , sample - rate , ( frame - cache or frame - files ) , channel - name ,
fake - strain , fake - strain - seed , fake - strain - from - file , gating _ file ) .
dyn _ range _ fac : { float , 1 } , optional
A large constant to reduce the dynamic range of the strain .
precision : string
Precision of the returned strain ( ' single ' or ' double ' ) .
inj _ filter _ rejector : InjFilterRejector instance ; optional , default = None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary .
Returns
strain : TimeSeries
The time series containing the conditioned strain data ."""
|
gating_info = { }
if opt . frame_cache or opt . frame_files or opt . frame_type :
if opt . frame_cache :
frame_source = opt . frame_cache
if opt . frame_files :
frame_source = opt . frame_files
logging . info ( "Reading Frames" )
if hasattr ( opt , 'frame_sieve' ) and opt . frame_sieve :
sieve = opt . frame_sieve
else :
sieve = None
if opt . frame_type :
strain = pycbc . frame . query_and_read_frame ( opt . frame_type , opt . channel_name , start_time = opt . gps_start_time - opt . pad_data , end_time = opt . gps_end_time + opt . pad_data , sieve = sieve )
else :
strain = pycbc . frame . read_frame ( frame_source , opt . channel_name , start_time = opt . gps_start_time - opt . pad_data , end_time = opt . gps_end_time + opt . pad_data , sieve = sieve )
if opt . zpk_z and opt . zpk_p and opt . zpk_k :
logging . info ( "Highpass Filtering" )
strain = highpass ( strain , frequency = opt . strain_high_pass )
logging . info ( "Applying zpk filter" )
z = numpy . array ( opt . zpk_z )
p = numpy . array ( opt . zpk_p )
k = float ( opt . zpk_k )
strain = filter_zpk ( strain . astype ( numpy . float64 ) , z , p , k )
if opt . normalize_strain :
logging . info ( "Dividing strain by constant" )
l = opt . normalize_strain
strain = strain / l
if opt . injection_file :
logging . info ( "Applying injections" )
injector = InjectionSet ( opt . injection_file )
injections = injector . apply ( strain , opt . channel_name [ 0 : 2 ] , distance_scale = opt . injection_scale_factor , inj_filter_rejector = inj_filter_rejector )
if opt . sgburst_injection_file :
logging . info ( "Applying sine-Gaussian burst injections" )
injector = SGBurstInjectionSet ( opt . sgburst_injection_file )
injector . apply ( strain , opt . channel_name [ 0 : 2 ] , distance_scale = opt . injection_scale_factor )
logging . info ( "Highpass Filtering" )
strain = highpass ( strain , frequency = opt . strain_high_pass )
if precision == 'single' :
logging . info ( "Converting to float32" )
strain = ( strain * dyn_range_fac ) . astype ( pycbc . types . float32 )
elif precision == "double" :
logging . info ( "Converting to float64" )
strain = ( strain * dyn_range_fac ) . astype ( pycbc . types . float64 )
else :
raise ValueError ( "Unrecognized precision {}" . format ( precision ) )
if opt . gating_file is not None :
logging . info ( "Gating times contained in gating file" )
gate_params = numpy . loadtxt ( opt . gating_file )
if len ( gate_params . shape ) == 1 :
gate_params = [ gate_params ]
strain = gate_data ( strain , gate_params )
gating_info [ 'file' ] = [ gp for gp in gate_params if ( gp [ 0 ] + gp [ 1 ] + gp [ 2 ] >= strain . start_time ) and ( gp [ 0 ] - gp [ 1 ] - gp [ 2 ] <= strain . end_time ) ]
if opt . autogating_threshold is not None : # the + 0 is for making a copy
glitch_times = detect_loud_glitches ( strain + 0. , threshold = opt . autogating_threshold , cluster_window = opt . autogating_cluster , low_freq_cutoff = opt . strain_high_pass , high_freq_cutoff = opt . sample_rate / 2 , corrupt_time = opt . pad_data + opt . autogating_pad )
gate_params = [ [ gt , opt . autogating_width , opt . autogating_taper ] for gt in glitch_times ]
if len ( glitch_times ) > 0 :
logging . info ( 'Autogating at %s' , ', ' . join ( [ '%.3f' % gt for gt in glitch_times ] ) )
strain = gate_data ( strain , gate_params )
gating_info [ 'auto' ] = gate_params
logging . info ( "Resampling data" )
strain = resample_to_delta_t ( strain , 1.0 / opt . sample_rate , method = 'ldas' )
logging . info ( "Highpass Filtering" )
strain = highpass ( strain , frequency = opt . strain_high_pass )
if hasattr ( opt , 'witness_frame_type' ) and opt . witness_frame_type :
stilde = strain . to_frequencyseries ( )
import h5py
tf_file = h5py . File ( opt . witness_tf_file )
for key in tf_file :
witness = pycbc . frame . query_and_read_frame ( opt . witness_frame_type , str ( key ) , start_time = strain . start_time , end_time = strain . end_time )
witness = ( witness * dyn_range_fac ) . astype ( strain . dtype )
tf = pycbc . types . load_frequencyseries ( opt . witness_tf_file , group = key )
tf = tf . astype ( stilde . dtype )
flen = int ( opt . witness_filter_length * strain . sample_rate )
tf = pycbc . psd . interpolate ( tf , stilde . delta_f )
tf_time = tf . to_timeseries ( )
window = Array ( numpy . hanning ( flen * 2 ) , dtype = strain . dtype )
tf_time [ 0 : flen ] *= window [ flen : ]
tf_time [ len ( tf_time ) - flen : ] *= window [ 0 : flen ]
tf = tf_time . to_frequencyseries ( )
kmax = min ( len ( tf ) , len ( stilde ) - 1 )
stilde [ : kmax ] -= tf [ : kmax ] * witness . to_frequencyseries ( ) [ : kmax ]
strain = stilde . to_timeseries ( )
logging . info ( "Remove Padding" )
start = opt . pad_data * opt . sample_rate
end = len ( strain ) - opt . sample_rate * opt . pad_data
strain = strain [ start : end ]
if opt . fake_strain or opt . fake_strain_from_file :
logging . info ( "Generating Fake Strain" )
if not opt . low_frequency_cutoff :
raise ValueError ( 'Please provide low frequency cutoff to ' 'generate a fake strain' )
duration = opt . gps_end_time - opt . gps_start_time
tlen = duration * opt . sample_rate
pdf = 1.0 / 128
plen = int ( opt . sample_rate / pdf ) / 2 + 1
if opt . fake_strain_from_file :
logging . info ( "Reading ASD from file" )
strain_psd = pycbc . psd . from_txt ( opt . fake_strain_from_file , plen , pdf , opt . low_frequency_cutoff , is_asd_file = True )
elif opt . fake_strain != 'zeroNoise' :
logging . info ( "Making PSD for strain" )
strain_psd = pycbc . psd . from_string ( opt . fake_strain , plen , pdf , opt . low_frequency_cutoff )
if opt . fake_strain == 'zeroNoise' :
logging . info ( "Making zero-noise time series" )
strain = TimeSeries ( pycbc . types . zeros ( tlen ) , delta_t = 1.0 / opt . sample_rate , epoch = opt . gps_start_time )
else :
logging . info ( "Making colored noise" )
from pycbc . noise . reproduceable import colored_noise
lowfreq = opt . low_frequency_cutoff / 2.
strain = colored_noise ( strain_psd , opt . gps_start_time , opt . gps_end_time , seed = opt . fake_strain_seed , low_frequency_cutoff = lowfreq )
strain = resample_to_delta_t ( strain , 1.0 / opt . sample_rate )
if not opt . channel_name and ( opt . injection_file or opt . sgburst_injection_file ) :
raise ValueError ( 'Please provide channel names with the format ' 'ifo:channel (e.g. H1:CALIB-STRAIN) to inject ' 'simulated signals into fake strain' )
if opt . injection_file :
logging . info ( "Applying injections" )
injector = InjectionSet ( opt . injection_file )
injections = injector . apply ( strain , opt . channel_name [ 0 : 2 ] , distance_scale = opt . injection_scale_factor , inj_filter_rejector = inj_filter_rejector )
if opt . sgburst_injection_file :
logging . info ( "Applying sine-Gaussian burst injections" )
injector = SGBurstInjectionSet ( opt . sgburst_injection_file )
injector . apply ( strain , opt . channel_name [ 0 : 2 ] , distance_scale = opt . injection_scale_factor )
if precision == 'single' :
logging . info ( "Converting to float32" )
strain = ( dyn_range_fac * strain ) . astype ( pycbc . types . float32 )
elif precision == 'double' :
logging . info ( "Converting to float64" )
strain = ( dyn_range_fac * strain ) . astype ( pycbc . types . float64 )
else :
raise ValueError ( "Unrecognized precision {}" . format ( precision ) )
if opt . taper_data :
logging . info ( "Tapering data" )
# Use auto - gating stuff for this , a one - sided gate is a taper
pd_taper_window = opt . taper_data
gate_params = [ ( strain . start_time , 0. , pd_taper_window ) ]
gate_params . append ( ( strain . end_time , 0. , pd_taper_window ) )
gate_data ( strain , gate_params )
if opt . injection_file :
strain . injections = injections
strain . gating_info = gating_info
return strain
|
def get_catalogs ( portal ) :
"""Returns the catalogs from the site"""
|
res = [ ]
for object in portal . objectValues ( ) :
if ICatalogTool . providedBy ( object ) :
res . append ( object )
elif IZCatalog . providedBy ( object ) :
res . append ( object )
res . sort ( )
return res
|
def find_children ( self , linespec ) :
"""Find lines and immediate children that match the linespec regex .
: param linespec : regular expression of line to match
: returns : list of lines . These correspond to the lines that were
matched and their immediate children"""
|
res = [ ]
for parent in self . find_objects ( linespec ) :
res . append ( parent . line )
res . extend ( [ child . line for child in parent . children ] )
return res
|
def projected_gradient_descent ( model_fn , x , eps , eps_iter , nb_iter , ord , clip_min = None , clip_max = None , y = None , targeted = False , rand_init = None , rand_minmax = 0.3 , sanity_checks = True ) :
"""This class implements either the Basic Iterative Method
( Kurakin et al . 2016 ) when rand _ init is set to 0 . or the
Madry et al . ( 2017 ) method when rand _ minmax is larger than 0.
Paper link ( Kurakin et al . 2016 ) : https : / / arxiv . org / pdf / 1607.02533 . pdf
Paper link ( Madry et al . 2017 ) : https : / / arxiv . org / pdf / 1706.06083 . pdf
: param model _ fn : a callable that takes an input tensor and returns the model logits .
: param x : input tensor .
: param eps : epsilon ( input variation parameter ) ; see https : / / arxiv . org / abs / 1412.6572.
: param eps _ iter : step size for each attack iteration
: param nb _ iter : Number of attack iterations .
: param ord : Order of the norm ( mimics NumPy ) . Possible values : np . inf , 1 or 2.
: param clip _ min : ( optional ) float . Minimum float value for adversarial example components .
: param clip _ max : ( optional ) float . Maximum float value for adversarial example components .
: param y : ( optional ) Tensor with true labels . If targeted is true , then provide the
target label . Otherwise , only provide this parameter if you ' d like to use true
labels when crafting adversarial samples . Otherwise , model predictions are used
as labels to avoid the " label leaking " effect ( explained in this paper :
https : / / arxiv . org / abs / 1611.01236 ) . Default is None .
: param targeted : ( optional ) bool . Is the attack targeted or untargeted ?
Untargeted , the default , will try to make the label incorrect .
Targeted will instead try to move in the direction of being more like y .
: param sanity _ checks : bool , if True , include asserts ( Turn them off to use less runtime /
memory or for unit tests that intentionally pass strange input )
: return : a tensor for the adversarial example"""
|
assert eps_iter <= eps , ( eps_iter , eps )
if ord == 1 :
raise NotImplementedError ( "It's not clear that FGM is a good inner loop" " step for PGD when ord=1, because ord=1 FGM " " changes only one pixel at a time. We need " " to rigorously test a strong ord=1 PGD " "before enabling this feature." )
if ord not in [ np . inf , 2 ] :
raise ValueError ( "Norm order must be either np.inf or 2." )
asserts = [ ]
# If a data range was specified , check that the input was in that range
if clip_min is not None :
asserts . append ( tf . math . greater_equal ( x , clip_min ) )
if clip_max is not None :
asserts . append ( tf . math . less_equal ( x , clip_max ) )
# Initialize loop variables
if rand_init :
rand_minmax = eps
eta = tf . random . uniform ( x . shape , - rand_minmax , rand_minmax )
else :
eta = tf . zeros_like ( x )
# Clip eta
eta = clip_eta ( eta , ord , eps )
adv_x = x + eta
if clip_min is not None or clip_max is not None :
adv_x = tf . clip_by_value ( adv_x , clip_min , clip_max )
if y is None : # Using model predictions as ground truth to avoid label leaking
y = tf . argmax ( model_fn ( x ) , 1 )
i = 0
while i < nb_iter :
adv_x = fast_gradient_method ( model_fn , adv_x , eps_iter , ord , clip_min = clip_min , clip_max = clip_max , y = y , targeted = targeted )
# Clipping perturbation eta to ord norm ball
eta = adv_x - x
eta = clip_eta ( eta , ord , eps )
adv_x = x + eta
# Redo the clipping .
# FGM already did it , but subtracting and re - adding eta can add some
# small numerical error .
if clip_min is not None or clip_max is not None :
adv_x = tf . clip_by_value ( adv_x , clip_min , clip_max )
i += 1
asserts . append ( eps_iter <= eps )
if ord == np . inf and clip_min is not None : # TODO necessary to cast to x . dtype ?
asserts . append ( eps + clip_min <= clip_max )
if sanity_checks :
assert np . all ( asserts )
return adv_x
|
def _terminate ( self , level : int ) -> bool :
"""Returns : succeeded in * attempting * a kill ?"""
|
if not self . running :
return True
# Already closed by itself ?
try :
self . wait ( 0 )
return True
except subprocess . TimeoutExpired : # failed to close
pass
# SEE NOTES ABOVE . This is tricky under Windows .
suffix = " [to child process {}]" . format ( self . process . pid )
if level == self . KILL_LEVEL_CTRL_C_OR_SOFT_KILL :
if WINDOWS :
success = 0 != ctypes . windll . kernel32 . GenerateConsoleCtrlEvent ( CTRL_C_EVENT , self . process . pid )
if success :
self . info ( "Sent CTRL-C to request stop" + suffix )
# . . . but that doesn ' t mean it ' ll stop . . .
else :
self . info ( "Failed to send CTRL-C" + suffix )
return success
else :
self . warning ( "Asking process to stop (SIGTERM)" + suffix )
self . process . terminate ( )
# soft kill under POSIX
return True
elif level == self . KILL_LEVEL_CTRL_BREAK :
if not WINDOWS :
return False
success = 0 != ctypes . windll . kernel32 . GenerateConsoleCtrlEvent ( CTRL_BREAK_EVENT , self . process . pid )
if success :
self . info ( "Sent CTRL-BREAK to request stop" + suffix )
else :
self . info ( "Failed to send CTRL-BREAK" + suffix )
return success
elif level == self . KILL_LEVEL_TASKKILL :
if not WINDOWS :
return False
retcode = self . _taskkill ( force = False )
# does its own info messages
return retcode == winerror . ERROR_SUCCESS
elif level == self . KILL_LEVEL_TASKKILL_FORCE :
if not WINDOWS :
return False
retcode = self . _taskkill ( force = True )
# does its own info messages
return retcode == winerror . ERROR_SUCCESS
elif level == self . KILL_LEVEL_HARD_KILL : # Last resort
self . _kill ( )
# may do TASKKILL / F or some other method
return True
else :
raise ValueError ( "Bad kill level requested" )
|
def post_order ( parent ) :
"""Post order a forest ."""
|
n = len ( parent )
k = 0
p = matrix ( 0 , ( n , 1 ) )
head = matrix ( - 1 , ( n , 1 ) )
next = matrix ( 0 , ( n , 1 ) )
stack = matrix ( 0 , ( n , 1 ) )
for j in range ( n - 1 , - 1 , - 1 ) :
if ( parent [ j ] == j ) :
continue
next [ j ] = head [ parent [ j ] ]
head [ parent [ j ] ] = j
for j in range ( n ) :
if ( parent [ j ] != j ) :
continue
k = __tdfs ( j , k , head , next , p , stack )
return p
|
def decorate_disabled ( self ) :
"""Return True if this decoration must be omitted , otherwise - False .
This class searches for tags values in environment variable
( : attr : ` . Verifier . _ _ environment _ var _ _ ` ) , Derived class can implement any logic
: return : bool"""
|
if len ( self . _tags ) == 0 :
return False
if self . _env_var not in os . environ :
return True
env_tags = os . environ [ self . _env_var ] . split ( self . __class__ . __tags_delimiter__ )
if '*' in env_tags :
return False
for tag in self . _tags :
if tag in env_tags :
return False
return True
|
def _import_parsers ( ) :
"""Lazy imports to prevent circular dependencies between this module and utils"""
|
global ARCGIS_NODES
global ARCGIS_ROOTS
global ArcGISParser
global FGDC_ROOT
global FgdcParser
global ISO_ROOTS
global IsoParser
global VALID_ROOTS
if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None :
from gis_metadata . arcgis_metadata_parser import ARCGIS_NODES
from gis_metadata . arcgis_metadata_parser import ARCGIS_ROOTS
from gis_metadata . arcgis_metadata_parser import ArcGISParser
if FGDC_ROOT is None or FgdcParser is None :
from gis_metadata . fgdc_metadata_parser import FGDC_ROOT
from gis_metadata . fgdc_metadata_parser import FgdcParser
if ISO_ROOTS is None or IsoParser is None :
from gis_metadata . iso_metadata_parser import ISO_ROOTS
from gis_metadata . iso_metadata_parser import IsoParser
if VALID_ROOTS is None :
VALID_ROOTS = { FGDC_ROOT } . union ( ARCGIS_ROOTS + ISO_ROOTS )
|
def get_content_models ( self ) :
"""Return all subclasses that are admin registered ."""
|
models = [ ]
for model in self . concrete_model . get_content_models ( ) :
try :
admin_url ( model , "add" )
except NoReverseMatch :
continue
else :
setattr ( model , "meta_verbose_name" , model . _meta . verbose_name )
setattr ( model , "add_url" , admin_url ( model , "add" ) )
models . append ( model )
return models
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.