signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def _get_cursor ( self , n_retries = 1 ) :
"""Returns a context manager for obtained from a single or pooled
connection , and sets the PostgreSQL search _ path to the schema
specified in the connection URL .
Although * connections * are threadsafe , * cursors * are bound to
connections and are * not * threadsafe . Do not share cursors
across threads .
Use this funciton like this : :
with hdp . _ get _ cursor ( ) as cur :
# your code
Do not call this function outside a contextmanager ."""
|
n_tries_rem = n_retries + 1
while n_tries_rem > 0 :
try :
conn = self . _pool . getconn ( ) if self . pooling else self . _conn
# autocommit = True obviates closing explicitly
conn . autocommit = True
cur = conn . cursor ( cursor_factory = psycopg2 . extras . DictCursor )
cur . execute ( "set search_path = {self.url.schema};" . format ( self = self ) )
yield cur
# contextmanager executes these when context exits
cur . close ( )
if self . pooling :
self . _pool . putconn ( conn )
break
except psycopg2 . OperationalError :
_logger . warning ( "Lost connection to {url}; attempting reconnect" . format ( url = self . url ) )
if self . pooling :
self . _pool . closeall ( )
self . _connect ( )
_logger . warning ( "Reconnected to {url}" . format ( url = self . url ) )
n_tries_rem -= 1
else : # N . B . Probably never reached
raise HGVSError ( "Permanently lost connection to {url} ({n} retries)" . format ( url = self . url , n = n_retries ) )
|
def debug_async ( self , conn_id , cmd_name , cmd_args , progress_callback , callback ) :
"""Asynchronously complete a named debug command .
The command name and arguments are passed to the underlying device adapter
and interpreted there . If the command is long running , progress _ callback
may be used to provide status updates . Callback is called when the command
has finished .
Args :
conn _ id ( int ) : A unique identifier that will refer to this connection
cmd _ name ( string ) : the name of the debug command we want to invoke
cmd _ args ( dict ) : any arguments that we want to send with this command .
progress _ callback ( callable ) : A function to be called with status on our progress , called as :
progress _ callback ( done _ count , total _ count )
callback ( callable ) : A callback for when we have finished the debug command , called as :
callback ( connection _ id , adapter _ id , success , retval , failure _ reason )
' connection _ id ' : the connection id
' adapter _ id ' : this adapter ' s id
' success ' : a bool indicating whether we received a response to our attempted RPC
' retval ' : A command specific dictionary of return value information
' failure _ reason ' : a string with the reason for the failure if success = = False"""
|
callback ( conn_id , self . id , False , None , "Debug commands are not supported by this DeviceAdapter" )
|
def create_ipsecpolicy ( name , profile = None , ** kwargs ) :
'''Creates a new IPsecPolicy
CLI Example :
. . code - block : : bash
salt ' * ' neutron . create _ ipsecpolicy ipsecpolicy - name
transform _ protocol = esp auth _ algorithm = sha1
encapsulation _ mode = tunnel encryption _ algorithm = aes - 128
: param name : Name of the IPSec policy
: param transform _ protocol : Transform protocol in lowercase ,
default : esp ( Optional )
: param auth _ algorithm : Authentication algorithm in lowercase ,
default : sha1 ( Optional )
: param encapsulation _ mode : Encapsulation mode in lowercase ,
default : tunnel ( Optional )
: param encryption _ algorithm : Encryption algorithm in lowercase ,
default : aes - 128 ( Optional )
: param pfs : Prefect Forward Security in lowercase ,
default : group5 ( Optional )
: param units : IPSec lifetime attribute . default : seconds ( Optional )
: param value : IPSec lifetime attribute . default : 3600 ( Optional )
: param profile : Profile to build on ( Optional )
: return : Created IPSec policy information'''
|
conn = _auth ( profile )
return conn . create_ipsecpolicy ( name , ** kwargs )
|
def update ( self , table_name , where_slice , new_values ) :
"""where _ slice - A Data WHICH WILL BE USED TO MATCH ALL IN table
eg { " id " : 42}
new _ values - A dict WITH COLUMN NAME , COLUMN VALUE PAIRS TO SET"""
|
new_values = quote_param ( new_values )
where_clause = SQL_AND . join ( [ quote_column ( k ) + "=" + quote_value ( v ) if v != None else quote_column ( k ) + SQL_IS_NULL for k , v in where_slice . items ( ) ] )
command = ( "UPDATE " + quote_column ( table_name ) + "\n" + "SET " + sql_list ( [ quote_column ( k ) + "=" + v for k , v in new_values . items ( ) ] ) + SQL_WHERE + where_clause )
self . execute ( command , { } )
|
def get_next_step ( self ) :
"""Find the proper step when user clicks the Next button .
: returns : The step to be switched to .
: rtype : WizardStep instance or None"""
|
subcategory = self . parent . step_kw_subcategory . selected_subcategory ( )
is_raster = is_raster_layer ( self . parent . layer )
has_classifications = get_classifications ( subcategory [ 'key' ] )
# Vector
if not is_raster :
return self . parent . step_kw_field
# Raster and has classifications
elif has_classifications :
return self . parent . step_kw_multi_classifications
# else go to source
return self . parent . step_kw_source
|
async def apply_commandline ( self , cmdline ) :
"""interprets a command line string
i . e . , splits it into separate command strings ,
instanciates : class : ` Commands < alot . commands . Command > `
accordingly and applies then in sequence .
: param cmdline : command line to interpret
: type cmdline : str"""
|
# remove initial spaces
cmdline = cmdline . lstrip ( )
# we pass Commands one by one to ` self . apply _ command ` .
# To properly call them in sequence , even if they trigger asyncronous
# code ( return Deferreds ) , these applications happen in individual
# callback functions which are then used as callback chain to some
# trivial Deferred that immediately calls its first callback . This way ,
# one callback may return a Deferred and thus postpone the application
# of the next callback ( and thus Command - application )
def apply_this_command ( cmdstring ) :
logging . debug ( '%s command string: "%s"' , self . mode , str ( cmdstring ) )
# translate cmdstring into : class : ` Command `
cmd = commandfactory ( cmdstring , self . mode )
# store cmdline for use with ' repeat ' command
if cmd . repeatable :
self . last_commandline = cmdline
return self . apply_command ( cmd )
try :
for c in split_commandline ( cmdline ) :
await apply_this_command ( c )
except Exception as e :
self . _error_handler ( e )
|
def run_directive ( self , name , arguments = None , options = None , content = None ) :
"""Generate directive node given arguments .
Parameters
name : str
name of directive .
arguments : list
list of positional arguments .
options : dict
key value arguments .
content : content
content of the directive
Returns
node : docutil Node
Node generated by the arguments ."""
|
if options is None :
options = { }
if content is None :
content = [ ]
if arguments is None :
arguments = [ ]
direc , _ = directive ( name , self . language , self . document )
direc = direc ( name = name , arguments = arguments , options = options , content = content , lineno = self . node . line , content_offset = 0 , block_text = 'Dummy BlockText' , state = self . state , state_machine = self )
return direc . run ( )
|
def getRow ( self , key ) :
"""Get a row by value of the indexing columns . If the index is not
specified , gets the only row of a dataframe with no indexing columns .
Args :
key : Tuple representing the index of the desired row .
Returns :
The row ."""
|
return Row ( self . _impl . getRow ( Tuple ( key ) . _impl ) )
|
def from_dict ( self , data ) :
"""Fill the current object with information from the specification"""
|
if "model" in data :
model = data [ "model" ]
self . description = model [ "description" ] if "description" in model else None
self . package = model [ "package" ] if "package" in model else None
self . extends = model [ "extends" ] if "extends" in model else [ ]
self . entity_name = model [ "entity_name" ] if "entity_name" in model else None
self . rest_name = model [ "rest_name" ] if "rest_name" in model else None
self . resource_name = model [ "resource_name" ] if "resource_name" in model else None
self . allows_get = model [ "get" ] if "get" in model else False
self . allows_create = model [ "create" ] if "create" in model else False
self . allows_update = model [ "update" ] if "update" in model else False
self . allows_delete = model [ "delete" ] if "delete" in model else False
self . is_root = model [ "root" ] if "root" in model else False
self . userlabel = model [ "userlabel" ] if "userlabel" in model else None
self . template = model [ "template" ] if "template" in model else False
self . allowed_job_commands = model [ "allowed_job_commands" ] if "allowed_job_commands" in model else None
if "attributes" in data :
self . attributes = self . _get_attributes ( data [ "attributes" ] )
if "children" in data :
self . child_apis = self . _get_apis ( data [ "children" ] )
|
def get_filters ( self ) :
"""Returns a collection of momentjs filters"""
|
return dict ( moment_format = self . format , moment_calendar = self . calendar , moment_fromnow = self . from_now , )
|
def reference ( self , symbol , count = 1 ) :
"""However , if referenced , ensure that the counter is applied to
the catch symbol ."""
|
if symbol == self . catch_symbol :
self . catch_symbol_usage += count
else :
self . parent . reference ( symbol , count )
|
def make_regex ( separator ) :
"""Utility function to create regexp for matching escaped separators
in strings ."""
|
return re . compile ( r'(?:' + re . escape ( separator ) + r')?((?:[^' + re . escape ( separator ) + r'\\]|\\.)+)' )
|
def replace ( cls , fileobj , old_pages , new_pages ) :
"""Replace old _ pages with new _ pages within fileobj .
old _ pages must have come from reading fileobj originally .
new _ pages are assumed to have the ' same ' data as old _ pages ,
and so the serial and sequence numbers will be copied , as will
the flags for the first and last pages .
fileobj will be resized and pages renumbered as necessary . As
such , it must be opened r + b or w + b ."""
|
if not len ( old_pages ) or not len ( new_pages ) :
raise ValueError ( "empty pages list not allowed" )
# Number the new pages starting from the first old page .
first = old_pages [ 0 ] . sequence
for page , seq in izip ( new_pages , xrange ( first , first + len ( new_pages ) ) ) :
page . sequence = seq
page . serial = old_pages [ 0 ] . serial
new_pages [ 0 ] . first = old_pages [ 0 ] . first
new_pages [ 0 ] . last = old_pages [ 0 ] . last
new_pages [ 0 ] . continued = old_pages [ 0 ] . continued
new_pages [ - 1 ] . first = old_pages [ - 1 ] . first
new_pages [ - 1 ] . last = old_pages [ - 1 ] . last
new_pages [ - 1 ] . complete = old_pages [ - 1 ] . complete
if not new_pages [ - 1 ] . complete and len ( new_pages [ - 1 ] . packets ) == 1 :
new_pages [ - 1 ] . position = - 1
new_data = [ cls . write ( p ) for p in new_pages ]
# Add dummy data or merge the remaining data together so multiple
# new pages replace an old one
pages_diff = len ( old_pages ) - len ( new_data )
if pages_diff > 0 :
new_data . extend ( [ b"" ] * pages_diff )
elif pages_diff < 0 :
new_data [ pages_diff - 1 : ] = [ b"" . join ( new_data [ pages_diff - 1 : ] ) ]
# Replace pages one by one . If the sizes match no resize happens .
offset_adjust = 0
new_data_end = None
assert len ( old_pages ) == len ( new_data )
for old_page , data in izip ( old_pages , new_data ) :
offset = old_page . offset + offset_adjust
data_size = len ( data )
resize_bytes ( fileobj , old_page . size , data_size , offset )
fileobj . seek ( offset , 0 )
fileobj . write ( data )
new_data_end = offset + data_size
offset_adjust += ( data_size - old_page . size )
# Finally , if there ' s any discrepency in length , we need to
# renumber the pages for the logical stream .
if len ( old_pages ) != len ( new_pages ) :
fileobj . seek ( new_data_end , 0 )
serial = new_pages [ - 1 ] . serial
sequence = new_pages [ - 1 ] . sequence + 1
cls . renumber ( fileobj , serial , sequence )
|
def split_string ( str_src , spliters = None , elim_empty = False ) : # type : ( AnyStr , Union [ AnyStr , List [ AnyStr ] , None ] , bool ) - > List [ AnyStr ]
"""Split string by split character space ( ' ' ) and indent ( ' \t ' ) as default
Examples :
> > > StringClass . split _ string ( ' exec - ini test . ini ' , ' ' )
[ ' exec ' , ' - ini ' , ' test . ini ' ]
Args :
str _ src : source string
spliters : e . g . [ ' ' , ' \t ' ] , [ ] , ' ' , None
elim _ empty : Eliminate empty ( i . e . , ' ' ) or not .
Returns :
split sub - strings as list"""
|
if is_string ( spliters ) :
spliters = [ spliters ]
if spliters is None or not spliters :
spliters = [ ' ' , '\t' ]
dest_strs = list ( )
src_strs = [ str_src ]
while True :
old_dest_strs = src_strs [ : ]
for s in spliters :
for src_s in src_strs :
temp_strs = src_s . split ( s )
for temp_s in temp_strs :
temp_s = temp_s . strip ( )
if temp_s == '' and elim_empty :
continue
if is_string ( temp_s ) :
temp_s = str ( temp_s )
dest_strs . append ( temp_s )
src_strs = dest_strs [ : ]
dest_strs = list ( )
if old_dest_strs == src_strs :
dest_strs = src_strs [ : ]
break
return dest_strs
|
def discover_by_file ( self , start_filepath , top_level_directory = None ) :
"""Run test discovery on a single file .
Parameters
start _ filepath : str
The module file in which to start test discovery .
top _ level _ directory : str
The path to the top - level directoy of the project . This is
the parent directory of the project ' stop - level Python
package ."""
|
start_filepath = os . path . abspath ( start_filepath )
start_directory = os . path . dirname ( start_filepath )
if top_level_directory is None :
top_level_directory = find_top_level_directory ( start_directory )
logger . debug ( 'Discovering tests in file: start_filepath=%r, ' 'top_level_directory=' , start_filepath , top_level_directory )
assert_start_importable ( top_level_directory , start_directory )
if top_level_directory not in sys . path :
sys . path . insert ( 0 , top_level_directory )
tests = self . _load_from_file ( start_filepath , top_level_directory )
return self . _loader . create_suite ( list ( tests ) )
|
def get_all_profiles ( store = 'local' ) :
'''Gets all properties for all profiles in the specified store
Args :
store ( str ) :
The store to use . This is either the local firewall policy or the
policy defined by local group policy . Valid options are :
- lgpo
- local
Default is ` ` local ` `
Returns :
dict : A dictionary containing the specified settings for each profile'''
|
return { 'Domain Profile' : get_all_settings ( profile = 'domain' , store = store ) , 'Private Profile' : get_all_settings ( profile = 'private' , store = store ) , 'Public Profile' : get_all_settings ( profile = 'public' , store = store ) }
|
def delete ( self , symbol , date_range = None ) :
"""Delete all chunks for a symbol .
Which are , for the moment , fully contained in the passed in
date _ range .
Parameters
symbol : ` str `
symbol name for the item
date _ range : ` date . DateRange `
DateRange to delete ticks in"""
|
query = { SYMBOL : symbol }
date_range = to_pandas_closed_closed ( date_range )
if date_range is not None :
assert date_range . start and date_range . end
query [ START ] = { '$gte' : date_range . start }
query [ END ] = { '$lte' : date_range . end }
else : # delete metadata on complete deletion
self . _metadata . delete_one ( { SYMBOL : symbol } )
return self . _collection . delete_many ( query )
|
def get_models ( self , uniprot_acc ) :
"""Return all available models for a UniProt accession number .
Args :
uniprot _ acc ( str ) : UniProt ACC / ID
Returns :
dict : All available models in SWISS - MODEL for this UniProt entry"""
|
if uniprot_acc in self . all_models :
return self . all_models [ uniprot_acc ]
else :
log . error ( '{}: no SWISS-MODELs available' . format ( uniprot_acc ) )
return None
|
def assemble_tlg_author_filepaths ( ) :
"""Reads TLG index and builds a list of absolute filepaths ."""
|
plaintext_dir_rel = '~/cltk_data/greek/text/tlg/plaintext/'
plaintext_dir = os . path . expanduser ( plaintext_dir_rel )
filepaths = [ os . path . join ( plaintext_dir , x + '.TXT' ) for x in TLG_INDEX ]
return filepaths
|
def create_streaming_endpoint ( access_token , name , description = "New Streaming Endpoint" , scale_units = "1" ) :
'''Create Media Service Streaming Endpoint .
Args :
access _ token ( str ) : A valid Azure authentication token .
name ( str ) : A Media Service Streaming Endpoint Name .
description ( str ) : A Media Service Streaming Endpoint Description .
scale _ units ( str ) : A Media Service Scale Units Number .
Returns :
HTTP response . JSON body .'''
|
path = '/StreamingEndpoints'
endpoint = '' . join ( [ ams_rest_endpoint , path ] )
body = '{ \
"Id":null, \
"Name":"' + name + '", \
"Description":"' + description + '", \
"Created":"0001-01-01T00:00:00", \
"LastModified":"0001-01-01T00:00:00", \
"State":null, \
"HostName":null, \
"ScaleUnits":"' + scale_units + '", \
"CrossSiteAccessPolicies":{ \
"ClientAccessPolicy":"<access-policy><cross-domain-access><policy><allow-from http-request-headers=\\"*\\"><domain uri=\\"http://*\\" /></allow-from><grant-to><resource path=\\"/\\" include-subpaths=\\"false\\" /></grant-to></policy></cross-domain-access></access-policy>", \
"CrossDomainPolicy":"<?xml version=\\"1.0\\"?><!DOCTYPE cross-domain-policy SYSTEM \\"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\\"><cross-domain-policy><allow-access-from domain=\\"*\\" /></cross-domain-policy>" \
} \
}'
return do_ams_post ( endpoint , path , body , access_token )
|
def fit ( self , vxvv , vxvv_err = None , pot = None , radec = False , lb = False , customsky = False , lb_to_customsky = None , pmllpmbb_to_customsky = None , tintJ = 10 , ntintJ = 1000 , integrate_method = 'dopr54_c' , ** kwargs ) :
"""NAME :
fit
PURPOSE :
fit an Orbit to data using the current orbit as the initial condition
INPUT :
vxvv - [ : , 6 ] array of positions and velocities along the orbit ( if not lb = True or radec = True , these need to be in natural units [ / ro , / vo ] , cannot be Quantities )
vxvv _ err = [ : , 6 ] array of errors on positions and velocities along the orbit ( if None , these are set to 0.01 ) ( if not lb = True or radec = True , these need to be in natural units [ / ro , / vo ] , cannot be Quantities )
pot = Potential to fit the orbit in
Keywords related to the input data :
radec = if True , input vxvv and vxvv are [ ra , dec , d , mu _ ra , mu _ dec , vlos ] in [ deg , deg , kpc , mas / yr , mas / yr , km / s ] ( all J2000.0 ; mu _ ra = mu _ ra * cos dec ) ; the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
lb = if True , input vxvv and vxvv are [ long , lat , d , mu _ ll , mu _ bb , vlos ] in [ deg , deg , kpc , mas / yr , mas / yr , km / s ] ( mu _ ll = mu _ ll * cos lat ) ; the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
customsky = if True , input vxvv and vxvv _ err are [ custom long , custom lat , d , mu _ customll , mu _ custombb , vlos ] in [ deg , deg , kpc , mas / yr , mas / yr , km / s ] ( mu _ ll = mu _ ll * cos lat ) where custom longitude and custom latitude are a custom set of sky coordinates ( e . g . , ecliptic ) and the proper motions are also expressed in these coordinats ; you need to provide the functions lb _ to _ customsky and pmllpmbb _ to _ customsky to convert to the custom sky coordinates ( these should have the same inputs and outputs as lb _ to _ radec and pmllpmbb _ to _ pmrapmdec ) ; the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
obs = [ X , Y , Z , vx , vy , vz ] - ( optional ) position and velocity of observer
( in kpc and km / s ; entries can be Quantity ) ( default = Object - wide default )
Cannot be an Orbit instance with the orbit of the reference point , as w / the ra etc . functions
Y is ignored and always assumed to be zero
ro = distance in kpc corresponding to R = 1 . ( default : taken from object ; can be Quantity )
vo = velocity in km / s corresponding to v = 1 . ( default : taken from object ; can be Quantity )
lb _ to _ customsky = function that converts l , b , degree = False to the custom sky coordinates ( like lb _ to _ radec ) ; needs to be given when customsky = True
pmllpmbb _ to _ customsky = function that converts pmll , pmbb , l , b , degree = False to proper motions in the custom sky coordinates ( like pmllpmbb _ to _ pmrapmdec ) ; needs to be given when customsky = True
Keywords related to the orbit integrations :
tintJ = ( default : 10 ) time to integrate orbits for fitting the orbit ( can be Quantity )
ntintJ = ( default : 1000 ) number of time - integration points
integrate _ method = ( default : ' dopr54 _ c ' ) integration method to use
disp = ( False ) display the optimizer ' s convergence message
OUTPUT :
max of log likelihood
HISTORY :
2014-06-17 - Written - Bovy ( IAS )"""
|
pot = flatten_potential ( pot )
_check_potential_dim ( self , pot )
_check_consistent_units ( self , pot )
return self . _orb . fit ( vxvv , vxvv_err = vxvv_err , pot = pot , radec = radec , lb = lb , customsky = customsky , lb_to_customsky = lb_to_customsky , pmllpmbb_to_customsky = pmllpmbb_to_customsky , tintJ = tintJ , ntintJ = ntintJ , integrate_method = integrate_method , ** kwargs )
|
def _template ( node_id , value = None ) :
"Check if a template is assigned to it and render that with the value"
|
result = [ ]
select_template_from_node = fetch_query_string ( 'select_template_from_node.sql' )
try :
result = db . execute ( text ( select_template_from_node ) , node_id = node_id )
template_result = result . fetchone ( )
result . close ( )
if template_result and template_result [ 'name' ] :
template = template_result [ 'name' ]
if isinstance ( value , dict ) :
return render_template ( template , ** value )
else :
return render_template ( template , value = value )
except DatabaseError as err :
current_app . logger . error ( "DatabaseError: %s" , err )
# No template assigned to this node so just return the value
return value
|
def _format_comments ( ret , comments ) :
'''DRY code for joining comments together and conditionally adding a period at
the end , and adding this comment string to the state return dict .'''
|
if isinstance ( comments , six . string_types ) :
ret [ 'comment' ] = comments
else :
ret [ 'comment' ] = '. ' . join ( comments )
if len ( comments ) > 1 :
ret [ 'comment' ] += '.'
return ret
|
def _get_fd ( fileobj ) :
"""Get a descriptor out of a file object .
: param fileobj :
An integer ( existing descriptor ) or any object having the ` fileno ( ) `
method .
: raises ValueError :
if the descriptor cannot be obtained or if the descriptor is invalid
: returns :
file descriptor number"""
|
if isinstance ( fileobj , int ) :
fd = fileobj
else :
try :
fd = fileobj . fileno ( )
except AttributeError :
fd = None
if fd is None or fd < 0 :
raise ValueError ( "invalid fileobj: {!r}" . format ( fileobj ) )
return fd
|
def _getZoomLevelRange ( self , resolution , unit = 'meters' ) :
"Return lower and higher zoom level given a resolution"
|
assert unit in ( 'meters' , 'degrees' )
if unit == 'meters' and self . unit == 'degrees' :
resolution = resolution / self . metersPerUnit
elif unit == 'degrees' and self . unit == 'meters' :
resolution = resolution * EPSG4326_METERS_PER_UNIT
lo = 0
hi = len ( self . RESOLUTIONS )
while lo < hi :
mid = ( lo + hi ) // 2
if resolution > self . RESOLUTIONS [ mid ] :
hi = mid
else :
lo = mid + 1
return lo , hi
|
def jarsign ( storepass , keypass , keystore , source , alias , path = None ) :
"""Uses Jarsign to sign an apk target file using the provided keystore information .
: param storepass ( str ) - keystore storepass
: param keypass ( str ) - keystore keypass
: param keystore ( str ) - keystore file path
: param source ( str ) - apk path
: param alias ( str ) - keystore alias
: param path ( str ) - basedir to run the command"""
|
cmd = [ 'jarsigner' , '-verbose' , '-storepass' , storepass , '-keypass' , keypass , '-keystore' , keystore , source , alias ]
common . run_cmd ( cmd , log = 'jarsign.log' , cwd = path )
|
def routingAreaUpdateRequest ( PTmsiSignature_presence = 0 , GprsTimer_presence = 0 , DrxParameter_presence = 0 , TmsiStatus_presence = 0 ) :
"""ROUTING AREA UPDATE REQUEST Section 9.4.14"""
|
a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x8 )
# 00001000
c = UpdateTypeAndCiphKeySeqNr ( )
e = RoutingAreaIdentification ( )
f = MsNetworkCapability ( )
packet = a / b / c / e / f
if PTmsiSignature_presence is 1 :
g = PTmsiSignature ( ieiPTS = 0x19 )
packet = packet / g
if GprsTimer_presence is 1 :
h = GprsTimer ( ieiGT = 0x17 )
packet = packet / h
if DrxParameter_presence is 1 :
i = DrxParameter ( ieiDP = 0x27 )
packet = packet / i
if TmsiStatus_presence is 1 :
j = TmsiStatus ( ieiTS = 0x9 )
packet = packet / j
return packet
|
def env ( client , paths , opt ) :
"""Renders a shell snippet based on paths in a Secretfile"""
|
old_prefix = False
old_prefix = opt . prefix and not ( opt . add_prefix or opt . add_suffix or not opt . merge_path )
if old_prefix :
LOG . warning ( "the prefix option is deprecated " "please use" "--no-merge-path --add-prefix $OLDPREFIX_ instead" )
elif opt . prefix :
LOG . warning ( "the prefix option is deprecated" "please use" "--no-merge-path --add-prefix $OLDPREFIX_ instead" )
key_map = cli_hash ( opt . key_map )
for path in paths :
secrets = client . read ( path )
if secrets and 'data' in secrets :
if is_aws ( secrets [ 'data' ] ) and 'sts' not in path :
renew_secret ( client , secrets , opt )
for s_key , s_val in secrets [ 'data' ] . items ( ) :
o_key = s_key
if s_key in key_map :
o_key = key_map [ s_key ]
# see https : / / github . com / Autodesk / aomi / issues / 40
env_name = None
if old_prefix :
env_name = ( "%s_%s" % ( opt . prefix , o_key ) ) . upper ( )
else :
env_name = secret_key_name ( path , o_key , opt ) . upper ( )
print ( "%s=\"%s\"" % ( env_name , s_val ) )
if opt . export :
print ( "export %s" % env_name )
|
def flush ( self , using = None , ** kwargs ) :
"""Preforms a flush operation on the index .
Any additional keyword arguments will be passed to
` ` Elasticsearch . indices . flush ` ` unchanged ."""
|
return self . _get_connection ( using ) . indices . flush ( index = self . _name , ** kwargs )
|
def VxLANTunnelState_originator_switch_info_switchIpV6Address ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
VxLANTunnelState = ET . SubElement ( config , "VxLANTunnelState" , xmlns = "http://brocade.com/ns/brocade-notification-stream" )
originator_switch_info = ET . SubElement ( VxLANTunnelState , "originator-switch-info" )
switchIpV6Address = ET . SubElement ( originator_switch_info , "switchIpV6Address" )
switchIpV6Address . text = kwargs . pop ( 'switchIpV6Address' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def progress ( status_code ) :
"""Translate PROGRESS status codes from GnuPG to messages ."""
|
lookup = { 'pk_dsa' : 'DSA key generation' , 'pk_elg' : 'Elgamal key generation' , 'primegen' : 'Prime generation' , 'need_entropy' : 'Waiting for new entropy in the RNG' , 'tick' : 'Generic tick without any special meaning - still working.' , 'starting_agent' : 'A gpg-agent was started.' , 'learncard' : 'gpg-agent or gpgsm is learning the smartcard data.' , 'card_busy' : 'A smartcard is still working.' }
for key , value in lookup . items ( ) :
if str ( status_code ) == key :
return value
|
def update_records ( self , records , scope_identifier , hub_name , plan_id , timeline_id ) :
"""UpdateRecords .
: param : class : ` < VssJsonCollectionWrapper > < azure . devops . v5_0 . task . models . VssJsonCollectionWrapper > ` records :
: param str scope _ identifier : The project GUID to scope the request
: param str hub _ name : The name of the server hub : " build " for the Build server or " rm " for the Release Management server
: param str plan _ id :
: param str timeline _ id :
: rtype : [ TimelineRecord ]"""
|
route_values = { }
if scope_identifier is not None :
route_values [ 'scopeIdentifier' ] = self . _serialize . url ( 'scope_identifier' , scope_identifier , 'str' )
if hub_name is not None :
route_values [ 'hubName' ] = self . _serialize . url ( 'hub_name' , hub_name , 'str' )
if plan_id is not None :
route_values [ 'planId' ] = self . _serialize . url ( 'plan_id' , plan_id , 'str' )
if timeline_id is not None :
route_values [ 'timelineId' ] = self . _serialize . url ( 'timeline_id' , timeline_id , 'str' )
content = self . _serialize . body ( records , 'VssJsonCollectionWrapper' )
response = self . _send ( http_method = 'PATCH' , location_id = '8893bc5b-35b2-4be7-83cb-99e683551db4' , version = '5.0' , route_values = route_values , content = content )
return self . _deserialize ( '[TimelineRecord]' , self . _unwrap_collection ( response ) )
|
def close ( self ) :
"""Closes the lvm handle . Usually you would never need to use this method unless
you are trying to do operations using the ctypes function wrappers in conversion . py
* Raises : *
* HandleError"""
|
if self . handle :
q = lvm_quit ( self . handle )
if q != 0 :
raise HandleError ( "Failed to close LVM handle." )
self . __handle = None
|
def bump ( self , kind = None , prerelease = None , inplace = True ) :
'''Increment the version and / or pre - release value
Parameters
kind : str
Increment the version . Can be ` ` major ` ` , ` ` minor ` ` , or ` ` patch ` ` ,
corresponding to the three segments of the version number ( in
order ) . A value of ` ` None ` ` will not increment the version number
( default ) .
prerelease : str
Increment the version ' s pre - release value . Can be ` ` alpha ` ` or
` ` beta ` ` . A prerelease value of ` ` None ` ` will remove a pre - release
value if it exists ( default ) .
inplace : bool
If false , returns a new ` ` BumpableVersion ` ` instance . If ` ` True ` `
( default ) , bumps the version in place .
Examples
The ` ` kind ` ` argument increments the version :
. . code - block : : python
> > > v = BumpableVersion ( ' 1.0.1 ' )
> > > v . bump ( ' patch ' )
BumpableVersion ( ' 1.0.2 ' )
> > > v . bump ( ' minor ' )
BumpableVersion ( ' 1.1 ' )
> > > v . bump ( ' minor ' )
BumpableVersion ( ' 1.2 ' )
> > > v . bump ( ' major ' )
BumpableVersion ( ' 2.0 ' )
> > > v . bump ( ' release ' ) # doctest : + ELLIPSIS
Traceback ( most recent call last ) :
ValueError : Bump kind " release " not understood
The prerelease argument increments the pre - release value . If ` ` kind ` `
is not supplied simultaneously the version is bumped with a patch
before entering pre - release :
. . code - block : : python
> > > v = BumpableVersion ( ' 1.0.0 ' )
> > > v . bump ( prerelease = ' alpha ' )
BumpableVersion ( ' 1.0.1a1 ' )
> > > v . bump ( prerelease = ' alpha ' )
BumpableVersion ( ' 1.0.1a2 ' )
> > > v . bump ( prerelease = ' beta ' )
BumpableVersion ( ' 1.0.1b1 ' )
> > > v . bump ( ' minor ' )
BumpableVersion ( ' 1.1 ' )
> > > v . bump ( ' minor ' , prerelease = ' beta ' )
BumpableVersion ( ' 1.2b1 ' )
> > > v . bump ( prerelease = ' beta ' )
BumpableVersion ( ' 1.2b2 ' )
> > > v . bump ( ' minor ' )
BumpableVersion ( ' 1.2 ' )
> > > v . bump ( ' minor ' , prerelease = ' beta ' )
BumpableVersion ( ' 1.3b1 ' )
> > > v . bump ( ' major ' , prerelease = ' alpha ' )
BumpableVersion ( ' 2.0a1 ' )
> > > v . bump ( ' major ' )
BumpableVersion ( ' 3.0 ' )
> > > v . bump ( ' patch ' , prerelease = ' beta ' )
BumpableVersion ( ' 3.0.1b1 ' )
> > > v . bump ( ' patch ' )
BumpableVersion ( ' 3.0.1 ' )
> > > v . bump ( prerelease = ' gamma ' ) # doctest : + ELLIPSIS
Traceback ( most recent call last ) :
ValueError : Prerelease type " gamma " not understood
Releases cannot move from beta to alpha without a new major / minor / patch
bump :
. . code - block : : python
> > > v = BumpableVersion ( ' 0.2b1 ' )
> > > v . bump ( prerelease = ' alpha ' ) # doctest : + ELLIPSIS + NORMALIZE _ WHITESPACE
Traceback ( most recent call last ) :
ValueError : Cannot bump version " 0.2b1 " to prerelease stage " alpha " - version already in beta
> > > v . bump ( ' minor ' )
BumpableVersion ( ' 0.2 ' )
Versions can return a new version or can be bumped in - place ( default ) :
. . code - block : : python
> > > v = BumpableVersion ( ' 0.2 ' )
> > > v . bump ( ' minor ' , inplace = False )
BumpableVersion ( ' 0.3 ' )
BumpableVersion ( ' 0.2 ' )'''
|
if kind is not None : # if already in pre - release and we want to move to pre - release ,
# increment version + prerelease
if self . prerelease and prerelease :
new_prerelease = self . _increment_prerelease ( None , prerelease )
new_version = self . _increment_version ( self . version , kind )
# if already in pre - release and we want to exit pre - release ,
# remove prerelease
elif self . prerelease :
new_prerelease = None
if self . version [ 2 ] == 0 :
if kind == 'minor' :
new_version = self . version
else :
new_version = self . _increment_version ( self . version , kind )
else :
if kind == 'patch' :
new_version = self . version
else :
new_version = self . _increment_version ( self . version , kind )
else :
new_prerelease = self . _increment_prerelease ( None , prerelease )
new_version = self . _increment_version ( self . version , kind )
elif prerelease is not None :
if self . prerelease :
new_prerelease = self . _increment_prerelease ( self . prerelease , prerelease )
new_version = self . version
else :
new_prerelease = self . _increment_prerelease ( None , prerelease )
new_version = self . _increment_version ( self . version , 'patch' )
else : # default is bump patch
new_prerelease = None
new_version = self . _increment_version ( self . version , 'patch' )
if inplace :
self . version = new_version
self . prerelease = new_prerelease
else :
new = BumpableVersion ( )
new . version = new_version
new . prerelease = new_prerelease
return new
|
def get_corpus_reader ( corpus_name : str = None , language : str = None ) -> CorpusReader :
"""Corpus reader factory method
: param corpus _ name : the name of the supported corpus , available as : [ package ] . SUPPORTED _ CORPORA
: param langugage : the language for search in
: return : NLTK compatible corpus reader"""
|
BASE = '~/cltk_data/{}/text' . format ( language )
root = os . path . join ( os . path . expanduser ( BASE ) , corpus_name )
if not os . path . exists ( root ) or corpus_name not in SUPPORTED_CORPORA . get ( language ) :
raise ValueError ( 'Specified corpus data not found, please install {} for language: {}' . format ( corpus_name , language ) )
sentence_tokenizer = TokenizeSentence ( language )
the_word_tokenizer = WordTokenizer ( language )
doc_pattern = r'.*\.txt'
# : Generic file ending , override below in your own CorpusReader implementation
if language == 'latin' :
if corpus_name == 'latin_text_latin_library' :
skip_keywords = [ 'Latin' , 'Library' ]
return FilteredPlaintextCorpusReader ( root = root , fileids = doc_pattern , sent_tokenizer = sentence_tokenizer , word_tokenizer = the_word_tokenizer , skip_keywords = skip_keywords )
if corpus_name == 'latin_text_perseus' :
valid_json_root = os . path . join ( root , 'cltk_json' )
# : we only support this subsection
return JsonfileCorpusReader ( root = valid_json_root , sent_tokenizer = sentence_tokenizer , word_tokenizer = the_word_tokenizer , target_language = 'latin' )
# perseus also contains English
if language == 'greek' :
if corpus_name == 'greek_text_perseus' :
valid_json_root = os . path . join ( root , 'cltk_json' )
# : we only support this subsection
return JsonfileCorpusReader ( root = valid_json_root , sent_tokenizer = sentence_tokenizer , word_tokenizer = the_word_tokenizer , target_language = 'grc' )
# : this abbreviation is required
if corpus_name == 'greek_text_tesserae' : # tokenizers / taggers need to be replaced with CLTK version
# most obv . for POS tagging !
return TesseraeCorpusReader ( root = root , fileids = r'.*\.tess' , sent_tokenizer = sent_tokenize , word_tokenizer = word_tokenize , pos_tagger = pos_tag , target_language = 'grc' )
|
def upload ( self , file_descriptor , settings ) :
"""Загружает файл в облако
: param file _ descriptor : открытый дескриптор
: param settings : настройки загрузки
: rtype : requests . Response"""
|
multipart_form_data = { 'file' : file_descriptor }
params = { "settings" : json . dumps ( settings ) }
dr = self . __app . native_api_call ( 'media' , 'upload' , params , self . __options , True , multipart_form_data , False , http_path = "/api/meta/v1/" , http_method = 'POST' , connect_timeout_sec = 60 * 10 )
return json . loads ( dr . text )
|
def loads ( cls , pickle_string ) :
"""Equivalent to pickle . loads except that the HoloViews trees is
restored appropriately ."""
|
cls . load_counter_offset = StoreOptions . id_offset ( )
val = pickle . loads ( pickle_string )
cls . load_counter_offset = None
return val
|
def neg_loglik ( self , beta ) :
"""Creates the negative log likelihood of the model
Parameters
beta : np . array
Contains untransformed starting values for latent variables
Returns
The negative log logliklihood of the model"""
|
_ , _ , _ , F , v = self . _model ( self . data , beta )
loglik = 0.0
for i in range ( 0 , self . data . shape [ 0 ] ) :
loglik += np . linalg . slogdet ( F [ : , : , i ] ) [ 1 ] + np . dot ( v [ i ] , np . dot ( np . linalg . pinv ( F [ : , : , i ] ) , v [ i ] ) )
return - ( - ( ( self . data . shape [ 0 ] / 2 ) * np . log ( 2 * np . pi ) ) - 0.5 * loglik . T [ 0 ] . sum ( ) )
|
def md_to_pdf ( input_name , output_name ) :
"""Converts an input MarkDown file to a PDF of the given output name .
Parameters
input _ name : String
Relative file location of the input file to where this function is being called .
output _ name : String
Relative file location of the output file to where this function is being called . Note that . pdf can be omitted .
Examples
Suppose we have a directory as follows :
data /
doc . md
To convert the document :
> > > from aide _ document import convert
> > > convert . md _ to _ pdf ( ' data / doc . md ' , ' data / doc . pdf ' )
. pdf can also be omitted from the second argument ."""
|
if output_name [ - 4 : ] == '.pdf' :
os . system ( "pandoc " + input_name + " -o " + output_name )
else :
os . system ( "pandoc " + input_name + " -o " + output_name + ".pdf" )
|
def initialize_state ( self ) :
"""Call this to initialize the state of the UI after everything has been connected ."""
|
if self . __hardware_source :
self . __data_item_states_changed_event_listener = self . __hardware_source . data_item_states_changed_event . listen ( self . __data_item_states_changed )
self . __acquisition_state_changed_event_listener = self . __hardware_source . acquisition_state_changed_event . listen ( self . __acquisition_state_changed )
if self . on_display_name_changed :
self . on_display_name_changed ( self . display_name )
self . __update_buttons ( )
if self . on_data_item_states_changed :
self . on_data_item_states_changed ( list ( ) )
|
def list_cidr_ips ( cidr ) :
'''Get a list of IP addresses from a CIDR .
CLI example : :
salt myminion netaddress . list _ cidr _ ips 192.168.0.0/20'''
|
ips = netaddr . IPNetwork ( cidr )
return [ six . text_type ( ip ) for ip in list ( ips ) ]
|
def _logjacobian ( self ) :
"""Calculates the logjacobian of the current parameters ."""
|
if self . sampling_transforms is None :
logj = 0.
else :
logj = self . sampling_transforms . logjacobian ( ** self . current_params )
return logj
|
def scene_name ( sequence_number , scene_id , name ) :
"""Create a scene . name message"""
|
return MessageWriter ( ) . string ( "scene.name" ) . uint64 ( sequence_number ) . uint32 ( scene_id ) . string ( name ) . get ( )
|
def _add_sensor ( self , sensor ) :
"""Add a new sensor to the tree .
Parameters
sensor : : class : ` katcp . Sensor ` object
New sensor to add to the tree ."""
|
self . _parent_to_children [ sensor ] = set ( )
self . _child_to_parents [ sensor ] = set ( )
|
def _update_alignment ( self , alignment ) :
"""Updates vertical text alignment button
Parameters
alignment : String in [ " top " , " middle " , " right " ]
\t Switches button to untoggled if False and toggled if True"""
|
states = { "top" : 2 , "middle" : 0 , "bottom" : 1 }
self . alignment_tb . state = states [ alignment ]
self . alignment_tb . toggle ( None )
self . alignment_tb . Refresh ( )
|
def send_heartbeat ( self ) :
"""Todo"""
|
self . logger . debug ( "heartbeat " + str ( self . t . sequence ) )
self . t . ws . send ( json . dumps ( { 'op' : self . t . HEARTBEAT , 'd' : self . t . sequence } ) )
|
def _maybe_start_instance ( instance ) :
"""Starts instance if it ' s stopped , no - op otherwise ."""
|
if not instance :
return
if instance . state [ 'Name' ] == 'stopped' :
instance . start ( )
while True :
print ( f"Waiting for {instance} to start." )
instance . reload ( )
if instance . state [ 'Name' ] == 'running' :
break
time . sleep ( 10 )
|
def run ( self , ** options ) :
"""Override runserver ' s entry point to bring Gunicorn on .
A large portion of code in this method is copied from
` django . core . management . commands . runserver ` ."""
|
shutdown_message = options . get ( 'shutdown_message' , '' )
self . stdout . write ( "Performing system checks...\n\n" )
self . check ( display_num_errors = True )
self . check_migrations ( )
now = datetime . datetime . now ( ) . strftime ( r'%B %d, %Y - %X' )
if six . PY2 :
now = now . decode ( get_system_encoding ( ) )
self . stdout . write ( now )
addr , port = self . addr , self . port
addr = '[{}]' . format ( addr ) if self . _raw_ipv6 else addr
runner = GunicornRunner ( addr , port , options )
try :
runner . run ( )
except KeyboardInterrupt :
runner . shutdown ( )
if shutdown_message :
self . stdout . write ( shutdown_message )
sys . exit ( 0 )
except :
runner . shutdown ( )
raise
|
def read_unicode ( path , encoding , encoding_errors ) :
"""Return the contents of a file as a unicode string ."""
|
try :
f = open ( path , 'rb' )
return make_unicode ( f . read ( ) , encoding , encoding_errors )
finally :
f . close ( )
|
def metric_get ( self , project , metric_name ) :
"""API call : retrieve a metric resource .
: type project : str
: param project : ID of the project containing the metric .
: type metric _ name : str
: param metric _ name : the name of the metric
: rtype : dict
: returns : The metric object returned from the API ( converted from a
protobuf to a dictionary ) ."""
|
path = "projects/%s/metrics/%s" % ( project , metric_name )
metric_pb = self . _gapic_api . get_log_metric ( path )
# NOTE : LogMetric message type does not have an ` ` Any ` ` field
# so ` MessageToDict ` ` can safely be used .
return MessageToDict ( metric_pb )
|
def mean ( attrs , inputs , proto_obj ) :
"""Mean of all the input tensors ."""
|
concat_input = [ symbol . expand_dims ( op_input , axis = 0 ) for op_input in inputs ]
concat_sym = symbol . concat ( * concat_input , dim = 0 )
mean_sym = symbol . mean ( concat_sym , axis = 0 )
return mean_sym , attrs , inputs
|
def signed_raw ( self ) -> str :
"""Return Revocation signed raw document string
: return :"""
|
if not isinstance ( self . identity , Identity ) :
raise MalformedDocumentError ( "Can not return full revocation document created from inline" )
raw = self . raw ( )
signed = "\n" . join ( self . signatures )
signed_raw = raw + signed + "\n"
return signed_raw
|
def when ( self , case_expr , result_expr ) :
"""Add a new case - result pair .
Parameters
case : Expr
Expression to equality - compare with base expression . Must be
comparable with the base .
result : Expr
Value when the case predicate evaluates to true .
Returns
builder : CaseBuilder"""
|
case_expr = ir . as_value_expr ( case_expr )
result_expr = ir . as_value_expr ( result_expr )
if not rlz . comparable ( self . base , case_expr ) :
raise TypeError ( 'Base expression and passed case are not ' 'comparable' )
cases = list ( self . cases )
cases . append ( case_expr )
results = list ( self . results )
results . append ( result_expr )
# Maintain immutability
return type ( self ) ( self . base , cases , results , self . default )
|
def build_bsub_command ( command_template , lsf_args ) :
"""Build and return a lsf batch command template
The structure will be ' bsub - s < key > < value > < command _ template > '
where < key > and < value > refer to items in lsf _ args"""
|
if command_template is None :
return ""
full_command = 'bsub -o {logfile}'
for key , value in lsf_args . items ( ) :
full_command += ' -%s' % key
if value is not None :
full_command += ' %s' % value
full_command += ' %s' % command_template
return full_command
|
def bgread ( stream , blockSizeLimit = 65535 , pollTime = .03 , closeStream = True ) :
'''bgread - Start a thread which will read from the given stream in a non - blocking fashion , and automatically populate data in the returned object .
@ param stream < object > - A stream on which to read . Socket , file , etc .
@ param blockSizeLimit < None / int > - Number of bytes . Default 65535.
If None , the stream will be read from until there is no more available data ( not closed , but you ' ve read all that ' s been flushed to straem ) . This is okay for smaller datasets , but this number effectively controls the amount of CPU time spent in I / O on this stream VS everything else in your application . The default of 65535 bytes is a fair amount of data .
@ param pollTime < float > - Default . 03 ( 30ms ) After all available data has been read from the stream , wait this many seconds before checking again for more data .
A low number here means a high priority , i . e . more cycles will be devoted to checking and collecting the background data . Since this is a non - blocking read , this value is the " block " , which will return execution context to the remainder of the application . The default of 100ms should be fine in most cases . If it ' s really idle data collection , you may want to try a value of 1 second .
@ param closeStream < bool > - Default True . If True , the " close " method on the stream object will be called when the other side has closed and all data has been read .
NOTES - -
blockSizeLimit / pollTime is your effective max - throughput . Real throughput will be lower than this number , as the actual throughput is be defined by :
T = ( blockSizeLimit / pollTime ) - DeviceReadTime ( blockSizeLimit )
Using the defaults of . 03 and 65535 means you ' ll read up to 2 MB per second . Keep in mind that the more time spent in I / O means less time spent doing other tasks .
@ return - The return of this function is a BackgroundReadData object . This object contains an attribute " blocks " which is a list of the non - zero - length blocks that were read from the stream . The object also contains a calculated property , " data " , which is a string / bytes ( depending on stream mode ) of all the data currently read . The property " isFinished " will be set to True when the stream has been closed . The property " error " will be set to any exception that occurs during reading which will terminate the thread . @ see BackgroundReadData for more info .'''
|
try :
pollTime = float ( pollTime )
except ValueError :
raise ValueError ( 'Provided poll time must be a float.' )
if not hasattr ( stream , 'read' ) and not hasattr ( stream , 'recv' ) :
raise ValueError ( 'Cannot read off provided stream, does not implement "read" or "recv"' )
if blockSizeLimit is not None :
try :
blockSizeLimit = int ( blockSizeLimit )
if blockSizeLimit <= 0 :
raise ValueError ( )
except ValueError :
raise ValueError ( 'Provided block size limit must be "None" for no limit, or a positive integer.' )
streamMode = detect_stream_mode ( stream )
results = BackgroundReadData ( streamMode )
thread = threading . Thread ( target = _do_bgread , args = ( stream , blockSizeLimit , pollTime , closeStream , results ) )
thread . daemon = True
# Automatically terminate this thread if program closes
thread . start ( )
return results
|
def apply_correlation ( self , sites , imt , residuals , stddev_intra = 0 ) :
"""Apply correlation to randomly sampled residuals .
: param sites :
: class : ` ~ openquake . hazardlib . site . SiteCollection ` residuals were
sampled for .
: param imt :
Intensity measure type object , see : mod : ` openquake . hazardlib . imt ` .
: param residuals :
2d numpy array of sampled residuals , where first dimension
represents sites ( the length as ` ` sites ` ` parameter ) and
second one represents different realizations ( samples ) .
: param stddev _ intra :
Intra - event standard deviation array . Note that different sites do
not necessarily have the same intra - event standard deviation .
: returns :
Array of the same structure and semantics as ` ` residuals ` `
but with correlations applied .
NB : the correlation matrix is cached . It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals ."""
|
# intra - event residual for a single relization is a product
# of lower - triangle decomposed correlation matrix and vector
# of N random numbers ( where N is equal to number of sites ) .
# we need to do that multiplication once per realization
# with the same matrix and different vectors .
try :
corma = self . cache [ imt ]
except KeyError :
corma = self . get_lower_triangle_correlation_matrix ( sites . complete , imt )
self . cache [ imt ] = corma
if len ( sites . complete ) == len ( sites ) :
return numpy . dot ( corma , residuals )
# it is important to allocate little memory , this is why I am
# accumulating below ; if S is the length of the complete sites
# the correlation matrix has shape ( S , S ) and the residuals ( N , s ) ,
# where s is the number of samples
return numpy . sum ( corma [ sites . sids , sid ] * res for sid , res in zip ( sites . sids , residuals ) )
|
def reset ( self ) :
"""Remove all annotations from window ."""
|
self . idx_annotations . setText ( 'Load Annotation File...' )
self . idx_rater . setText ( '' )
self . annot = None
self . dataset_markers = None
# remove dataset marker
self . idx_marker . clearContents ( )
self . idx_marker . setRowCount ( 0 )
# remove summary statistics
w1 = self . idx_summary . takeAt ( 1 ) . widget ( )
w2 = self . idx_summary . takeAt ( 1 ) . widget ( )
self . idx_summary . removeWidget ( w1 )
self . idx_summary . removeWidget ( w2 )
w1 . deleteLater ( )
w2 . deleteLater ( )
b1 = QGroupBox ( 'Staging' )
b2 = QGroupBox ( 'Signal quality' )
self . idx_summary . addWidget ( b1 )
self . idx_summary . addWidget ( b2 )
# remove annotations
self . display_eventtype ( )
self . update_annotations ( )
self . parent . create_menubar ( )
|
def settle ( self , channel_identifier : ChannelID , transferred_amount : TokenAmount , locked_amount : TokenAmount , locksroot : Locksroot , partner : Address , partner_transferred_amount : TokenAmount , partner_locked_amount : TokenAmount , partner_locksroot : Locksroot , given_block_identifier : BlockSpecification , ) :
"""Settle the channel ."""
|
log_details = { 'channel_identifier' : channel_identifier , 'token_network' : pex ( self . address ) , 'node' : pex ( self . node_address ) , 'partner' : pex ( partner ) , 'transferred_amount' : transferred_amount , 'locked_amount' : locked_amount , 'locksroot' : encode_hex ( locksroot ) , 'partner_transferred_amount' : partner_transferred_amount , 'partner_locked_amount' : partner_locked_amount , 'partner_locksroot' : encode_hex ( partner_locksroot ) , }
log . debug ( 'settle called' , ** log_details )
checking_block = self . client . get_checking_block ( )
# and now find out
our_maximum = transferred_amount + locked_amount
partner_maximum = partner_transferred_amount + partner_locked_amount
# The second participant transferred + locked amount must be higher
our_bp_is_larger = our_maximum > partner_maximum
if our_bp_is_larger :
kwargs = { 'participant1' : partner , 'participant1_transferred_amount' : partner_transferred_amount , 'participant1_locked_amount' : partner_locked_amount , 'participant1_locksroot' : partner_locksroot , 'participant2' : self . node_address , 'participant2_transferred_amount' : transferred_amount , 'participant2_locked_amount' : locked_amount , 'participant2_locksroot' : locksroot , }
else :
kwargs = { 'participant1' : self . node_address , 'participant1_transferred_amount' : transferred_amount , 'participant1_locked_amount' : locked_amount , 'participant1_locksroot' : locksroot , 'participant2' : partner , 'participant2_transferred_amount' : partner_transferred_amount , 'participant2_locked_amount' : partner_locked_amount , 'participant2_locksroot' : partner_locksroot , }
try :
self . _settle_preconditions ( channel_identifier = channel_identifier , partner = partner , block_identifier = given_block_identifier , )
except NoStateForBlockIdentifier : # If preconditions end up being on pruned state skip them . Estimate
# gas will stop us from sending a transaction that will fail
pass
with self . channel_operations_lock [ partner ] :
error_prefix = 'Call to settle will fail'
gas_limit = self . proxy . estimate_gas ( checking_block , 'settleChannel' , channel_identifier = channel_identifier , ** kwargs , )
if gas_limit :
error_prefix = 'settle call failed'
gas_limit = safe_gas_limit ( gas_limit , GAS_REQUIRED_FOR_SETTLE_CHANNEL )
transaction_hash = self . proxy . transact ( 'settleChannel' , gas_limit , channel_identifier = channel_identifier , ** kwargs , )
self . client . poll ( transaction_hash )
receipt_or_none = check_transaction_threw ( self . client , transaction_hash )
transaction_executed = gas_limit is not None
if not transaction_executed or receipt_or_none :
if transaction_executed :
block = receipt_or_none [ 'blockNumber' ]
else :
block = checking_block
self . proxy . jsonrpc_client . check_for_insufficient_eth ( transaction_name = 'settleChannel' , transaction_executed = transaction_executed , required_gas = GAS_REQUIRED_FOR_SETTLE_CHANNEL , block_identifier = block , )
msg = self . _check_channel_state_after_settle ( participant1 = self . node_address , participant2 = partner , block_identifier = block , channel_identifier = channel_identifier , )
error_msg = f'{error_prefix}. {msg}'
log . critical ( error_msg , ** log_details )
raise RaidenUnrecoverableError ( error_msg )
log . info ( 'settle successful' , ** log_details )
|
def predicates ( G : Graph , n : Node ) -> Set [ TriplePredicate ] :
"""redicates ( G , n ) is the set of predicates in neigh ( G , n ) .
predicates ( G , n ) = predicatesOut ( G , n ) ∪ predicatesIn ( G , n )"""
|
return predicatesOut ( G , n ) | predicatesIn ( G , n )
|
def domains ( request ) :
"""A page with number of services and layers faceted on domains ."""
|
url = ''
query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'
if settings . SEARCH_TYPE == 'elasticsearch' :
url = '%s/select?q=%s' % ( settings . SEARCH_URL , query )
if settings . SEARCH_TYPE == 'solr' :
url = '%s/solr/hypermap/select?q=%s' % ( settings . SEARCH_URL , query )
LOGGER . debug ( url )
response = urllib2 . urlopen ( url )
data = response . read ( ) . replace ( '\n' , '' )
# stats
layers_count = Layer . objects . all ( ) . count ( )
services_count = Service . objects . all ( ) . count ( )
template = loader . get_template ( 'aggregator/index.html' )
context = RequestContext ( request , { 'data' : data , 'layers_count' : layers_count , 'services_count' : services_count , } )
return HttpResponse ( template . render ( context ) )
|
def waitForCompletion ( self ) :
"""Wait for all threads to complete their work
The worker threads are told to quit when they receive a task
that is a tuple of ( None , None ) . This routine puts as many of
those tuples in the task queue as there are threads . As soon as
a thread receives one of these tuples , it dies ."""
|
for x in range ( self . numberOfThreads ) :
self . taskQueue . put ( ( None , None ) )
for t in self . threadList : # print " attempting to join % s " % t . getName ( )
t . join ( )
|
def get_lr_scheduler ( scheduler_type : str , updates_per_checkpoint : int , learning_rate_half_life : int , learning_rate_reduce_factor : float , learning_rate_reduce_num_not_improved : int , learning_rate_schedule : Optional [ List [ Tuple [ float , int ] ] ] = None , learning_rate_warmup : Optional [ int ] = 0 ) -> Optional [ LearningRateScheduler ] :
"""Returns a learning rate scheduler .
: param scheduler _ type : Scheduler type .
: param updates _ per _ checkpoint : Number of batches between checkpoints .
: param learning _ rate _ half _ life : Half life of the learning rate in number of checkpoints .
: param learning _ rate _ reduce _ factor : Factor to reduce learning rate with .
: param learning _ rate _ reduce _ num _ not _ improved : Number of checkpoints with no improvement after which learning rate is
reduced .
: param learning _ rate _ schedule : Optional fixed learning rate schedule .
: param learning _ rate _ warmup : Number of batches that the learning rate is linearly increased .
: raises : ValueError if unknown scheduler _ type
: return : Learning rate scheduler ."""
|
check_condition ( learning_rate_schedule is None or scheduler_type == C . LR_SCHEDULER_FIXED_STEP , "Learning rate schedule can only be used with '%s' learning rate scheduler." % C . LR_SCHEDULER_FIXED_STEP )
if scheduler_type is None :
return None
if scheduler_type == C . LR_SCHEDULER_FIXED_RATE_INV_SQRT_T :
return LearningRateSchedulerInvSqrtT ( updates_per_checkpoint , learning_rate_half_life , learning_rate_warmup )
elif scheduler_type == C . LR_SCHEDULER_FIXED_RATE_INV_T :
return LearningRateSchedulerInvT ( updates_per_checkpoint , learning_rate_half_life , learning_rate_warmup )
elif scheduler_type == C . LR_SCHEDULER_FIXED_STEP :
check_condition ( learning_rate_schedule is not None , "learning_rate_schedule needed for %s scheduler" % C . LR_SCHEDULER_FIXED_STEP )
return LearningRateSchedulerFixedStep ( learning_rate_schedule , updates_per_checkpoint )
elif scheduler_type == C . LR_SCHEDULER_PLATEAU_REDUCE :
check_condition ( learning_rate_reduce_factor is not None , "learning_rate_reduce_factor needed for %s scheduler" % C . LR_SCHEDULER_PLATEAU_REDUCE )
check_condition ( learning_rate_reduce_num_not_improved is not None , "learning_rate_reduce_num_not_improved needed for %s scheduler" % C . LR_SCHEDULER_PLATEAU_REDUCE )
if learning_rate_reduce_factor >= 1.0 :
logger . warning ( "Not using %s learning rate scheduling: learning_rate_reduce_factor == 1.0" % C . LR_SCHEDULER_PLATEAU_REDUCE )
return None
return LearningRateSchedulerPlateauReduce ( learning_rate_reduce_factor , learning_rate_reduce_num_not_improved , learning_rate_warmup )
else :
raise ValueError ( "Unknown learning rate scheduler type %s." % scheduler_type )
|
def get_nested_dicts_with_key_containing_value ( parent_dict : dict , key , value ) :
"""Return all nested dictionaries that contain a key with a specific value . A sub - case of NestedLookup ."""
|
references = [ ]
NestedLookup ( parent_dict , references , NestedLookup . key_value_containing_value_factory ( key , value ) )
return ( document for document , _ in references )
|
def value ( self , value ) :
"""set the value"""
|
# for the indep direction we also allow a string which points to one
# of the other available dimensions
# TODO : support c , fc , ec ?
if isinstance ( value , common . basestring ) and value in [ 'x' , 'y' , 'z' ] : # we ' ll cast just to get rid of any python2 unicodes
self . _value = str ( value )
dimension = value
self . _unit = getattr ( self . call , dimension ) . unit
return
# NOTE : cannot do super on setter directly , see this python
# bug : https : / / bugs . python . org / issue14965 and discussion :
# https : / / mail . python . org / pipermail / python - dev / 2010 - April / 099672 . html
super ( CallDimensionI , self ) . _set_value ( value )
|
def smsc ( self , smscNumber ) :
"""Set the default SMSC number to use when sending SMS messages"""
|
if smscNumber != self . _smscNumber :
if self . alive :
self . write ( 'AT+CSCA="{0}"' . format ( smscNumber ) )
self . _smscNumber = smscNumber
|
def get_bounds ( self ) :
"""Extracts the bounds of all the inputs of the domain of the * model *"""
|
bounds = [ ]
for variable in self . space_expanded :
bounds += variable . get_bounds ( )
return bounds
|
def _strip_commas ( cls , kw ) :
"Strip out any leading / training commas from the token"
|
kw = kw [ : - 1 ] if kw [ - 1 ] == ',' else kw
return kw [ 1 : ] if kw [ 0 ] == ',' else kw
|
def rouwenhorst ( rho , sigma , N ) :
"""Approximate an AR1 process by a finite markov chain using Rouwenhorst ' s method .
: param rho : autocorrelation of the AR1 process
: param sigma : conditional standard deviation of the AR1 process
: param N : number of states
: return [ nodes , P ] : equally spaced nodes and transition matrix"""
|
from numpy import sqrt , linspace , array , zeros
sigma = float ( sigma )
if N == 1 :
nodes = array ( [ 0.0 ] )
transitions = array ( [ [ 1.0 ] ] )
return [ nodes , transitions ]
p = ( rho + 1 ) / 2
q = p
nu = sqrt ( ( N - 1 ) / ( 1 - rho ** 2 ) ) * sigma
nodes = linspace ( - nu , nu , N )
sig_a = sigma
n = 1
# mat0 = array ( [ [ 1 ] ] )
mat0 = array ( [ [ p , 1 - p ] , [ 1 - q , q ] ] )
if N == 2 :
return [ nodes , mat0 ]
for n in range ( 3 , N + 1 ) :
mat = zeros ( ( n , n ) )
mat_A = mat . copy ( )
mat_B = mat . copy ( )
mat_C = mat . copy ( )
mat_D = mat . copy ( )
mat_A [ : - 1 , : - 1 ] = mat0
mat_B [ : - 1 , 1 : ] = mat0
mat_C [ 1 : , : - 1 ] = mat0
mat_D [ 1 : , 1 : ] = mat0
mat0 = p * mat_A + ( 1 - p ) * mat_B + ( 1 - q ) * mat_C + q * mat_D
mat0 [ 1 : - 1 , : ] = mat0 [ 1 : - 1 , : ] / 2
P = mat0
return [ nodes , P ]
|
def send_reset_password_instructions ( user ) : # type : ( User ) - > None
"""Send the reset password instructions email for the specified user .
: param user : The user to send the instructions to"""
|
token = generate_reset_password_token ( user )
url = url_for ( "login.reset_password" , token = token )
reset_link = request . url_root [ : - 1 ] + url
subject = _ ( "Password reset instruction for {site_name}" ) . format ( site_name = current_app . config . get ( "SITE_NAME" ) )
mail_template = "password_reset_instructions"
send_mail ( subject , user . email , mail_template , user = user , reset_link = reset_link )
|
def get_feature_state ( self , feature_id , user_scope ) :
"""GetFeatureState .
[ Preview API ] Get the state of the specified feature for the given user / all - users scope
: param str feature _ id : Contribution id of the feature
: param str user _ scope : User - Scope at which to get the value . Should be " me " for the current user or " host " for all users .
: rtype : : class : ` < ContributedFeatureState > < azure . devops . v5_0 . feature _ management . models . ContributedFeatureState > `"""
|
route_values = { }
if feature_id is not None :
route_values [ 'featureId' ] = self . _serialize . url ( 'feature_id' , feature_id , 'str' )
if user_scope is not None :
route_values [ 'userScope' ] = self . _serialize . url ( 'user_scope' , user_scope , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '98911314-3f9b-4eaf-80e8-83900d8e85d9' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'ContributedFeatureState' , response )
|
def create ( self ) :
"""Create an instance of the Access Control Service with the typical
starting settings ."""
|
self . service . create ( )
# Set environment variables for immediate use
predix . config . set_env_value ( self . use_class , 'uri' , self . _get_uri ( ) )
predix . config . set_env_value ( self . use_class , 'zone_id' , self . _get_zone_id ( ) )
|
def _get_project_name ( args ) :
"""Get project name ."""
|
name = args . get ( 0 )
puts ( "" )
while not name :
name = raw_input ( "What is the project's short directory name? (e.g. my_project) " )
return name
|
def read_lsm_eventlist ( fh ) :
"""Read LSM events from file and return as list of ( time , type , text ) ."""
|
count = struct . unpack ( '<II' , fh . read ( 8 ) ) [ 1 ]
events = [ ]
while count > 0 :
esize , etime , etype = struct . unpack ( '<IdI' , fh . read ( 16 ) )
etext = bytes2str ( stripnull ( fh . read ( esize - 16 ) ) )
events . append ( ( etime , etype , etext ) )
count -= 1
return events
|
def parse ( self , instrs ) :
"""Parse an IR instruction ."""
|
instrs_reil = [ ]
try :
for instr in instrs :
instr_lower = instr . lower ( )
# If the instruction to parsed is not in the cache ,
# parse it and add it to the cache .
if instr_lower not in self . _cache :
self . _cache [ instr_lower ] = instruction . parseString ( instr_lower ) [ 0 ]
# Retrieve parsed instruction from the cache and clone
# it .
instrs_reil += [ copy . deepcopy ( self . _cache [ instr_lower ] ) ]
except :
error_msg = "Failed to parse instruction: %s"
logger . error ( error_msg , instr , exc_info = True )
return instrs_reil
|
def resolve ( self , executor , targets , classpath_products , confs = None , extra_args = None , invalidate_dependents = False ) :
"""Resolves external classpath products ( typically jars ) for the given targets .
: API : public
: param executor : A java executor to run ivy with .
: type executor : : class : ` pants . java . executor . Executor `
: param targets : The targets to resolve jvm dependencies for .
: type targets : : class : ` collections . Iterable ` of : class : ` pants . build _ graph . target . Target `
: param classpath _ products : The classpath products to populate with the results of the resolve .
: type classpath _ products : : class : ` pants . backend . jvm . tasks . classpath _ products . ClasspathProducts `
: param confs : The ivy configurations to resolve ; ( ' default ' , ) by default .
: type confs : : class : ` collections . Iterable ` of string
: param extra _ args : Any extra command line arguments to pass to ivy .
: type extra _ args : list of string
: param bool invalidate _ dependents : ` True ` to invalidate dependents of targets that needed to be
resolved .
: returns : The results of each of the resolves run by this call .
: rtype : list of IvyResolveResult"""
|
confs = confs or ( 'default' , )
targets_by_sets = JarDependencyManagement . global_instance ( ) . targets_by_artifact_set ( targets )
results = [ ]
for artifact_set , target_subset in targets_by_sets . items ( ) :
results . append ( self . _resolve_subset ( executor , target_subset , classpath_products , confs = confs , extra_args = extra_args , invalidate_dependents = invalidate_dependents , pinned_artifacts = artifact_set ) )
return results
|
def transition_matrix ( C , reversible = False , mu = None , method = 'auto' , ** kwargs ) :
r"""Estimate the transition matrix from the given countmatrix .
Parameters
C : numpy ndarray or scipy . sparse matrix
Count matrix
reversible : bool ( optional )
If True restrict the ensemble of transition matrices
to those having a detailed balance symmetry otherwise
the likelihood optimization is carried out over the whole
space of stochastic matrices .
mu : array _ like
The stationary distribution of the MLE transition matrix .
method : str
Select which implementation to use for the estimation .
One of ' auto ' , ' dense ' and ' sparse ' , optional , default = ' auto ' .
' dense ' always selects the dense implementation , ' sparse ' always selects
the sparse one .
' auto ' selects the most efficient implementation according to
the sparsity structure of the matrix : if the occupation of the C
matrix is less then one third , select sparse . Else select dense .
The type of the T matrix returned always matches the type of the
C matrix , irrespective of the method that was used to compute it .
* * kwargs : Optional algorithm - specific parameters . See below for special cases
Xinit : ( M , M ) ndarray
Optional parameter with reversible = True .
initial value for the matrix of absolute transition probabilities . Unless set otherwise ,
will use X = diag ( pi ) t , where T is a nonreversible transition matrix estimated from C ,
i . e . T _ ij = c _ ij / sum _ k c _ ik , and pi is its stationary distribution .
maxiter : 100000 : int
Optional parameter with reversible = True .
maximum number of iterations before the method exits
maxerr : 1e - 8 : float
Optional parameter with reversible = True .
convergence tolerance for transition matrix estimation .
This specifies the maximum change of the Euclidean norm of relative
stationary probabilities ( : math : ` x _ i = \ sum _ k x _ { ik } ` ) . The relative stationary probability changes
: math : ` e _ i = ( x _ i ^ { ( 1 ) } - x _ i ^ { ( 2 ) } ) / ( x _ i ^ { ( 1 ) } + x _ i ^ { ( 2 ) } ) ` are used in order to track changes in small
probabilities . The Euclidean norm of the change vector , : math : ` | e _ i | _ 2 ` , is compared to maxerr .
rev _ pisym : bool , default = False
Fast computation of reversible transition matrix by normalizing
: math : ` x _ { ij } = \ pi _ i p _ { ij } + \ pi _ j p _ { ji } ` . : math : ` p _ { ij } ` is the direct
( nonreversible ) estimate and : math : ` pi _ i ` is its stationary distribution .
This estimator is asympotically unbiased but not maximum likelihood .
return _ statdist : bool , default = False
Optional parameter with reversible = True .
If set to true , the stationary distribution is also returned
return _ conv : bool , default = False
Optional parameter with reversible = True .
If set to true , the likelihood history and the pi _ change history is returned .
warn _ not _ converged : bool , default = True
Prints a warning if not converged .
sparse _ newton : bool , default = False
If True , use the experimental primal - dual interior - point solver for sparse input / computation method .
Returns
P : ( M , M ) ndarray or scipy . sparse matrix
The MLE transition matrix . P has the same data type ( dense or sparse )
as the input matrix C .
The reversible estimator returns by default only P , but may also return
( P , pi ) or ( P , lhist , pi _ changes ) or ( P , pi , lhist , pi _ changes ) depending on the return settings
P : ndarray ( n , n )
transition matrix . This is the only return for return _ statdist = False , return _ conv = False
( pi ) : ndarray ( n )
stationary distribution . Only returned if return _ statdist = True
( lhist ) : ndarray ( k )
likelihood history . Has the length of the number of iterations needed .
Only returned if return _ conv = True
( pi _ changes ) : ndarray ( k )
history of likelihood history . Has the length of the number of iterations needed .
Only returned if return _ conv = True
Notes
The transition matrix is a maximum likelihood estimate ( MLE ) of
the probability distribution of transition matrices with
parameters given by the count matrix .
References
. . [ 1 ] Prinz , J H , H Wu , M Sarich , B Keller , M Senne , M Held , J D
Chodera , C Schuette and F Noe . 2011 . Markov models of
molecular kinetics : Generation and validation . J Chem Phys
134 : 174105
. . [ 2 ] Bowman , G R , K A Beauchamp , G Boxer and V S Pande . 2009.
Progress and challenges in the automated construction of Markov state models for full protein systems .
J . Chem . Phys . 131 : 124101
. . [ 3 ] Trendelkamp - Schroer , B , H Wu , F Paul and F . Noe . 2015
Estimation and uncertainty of reversible Markov models .
J . Chem . Phys . 143 : 174101
Examples
> > > import numpy as np
> > > from msmtools . estimation import transition _ matrix
> > > C = np . array ( [ [ 10 , 1 , 1 ] , [ 2 , 0 , 3 ] , [ 0 , 1 , 4 ] ] )
Non - reversible estimate
> > > T _ nrev = transition _ matrix ( C )
> > > T _ nrev
array ( [ [ 0.833333 , 0.0833333 , 0.0833333 ] ,
[ 0.4 , 0 . , 0.6 ] ,
[ 0 . , 0.2 , 0.8 ] ] )
Reversible estimate
> > > T _ rev = transition _ matrix ( C , reversible = True )
> > > T _ rev
array ( [ [ 0.833333 , 0.10385551 , 0.06281115 ] ,
[ 0.35074677 , 0 . , 0.64925323 ] ,
[ 0.04925323 , 0.15074677 , 0.8 ] ] )
Reversible estimate with given stationary vector
> > > mu = np . array ( [ 0.7 , 0.01 , 0.29 ] )
> > > T _ mu = transition _ matrix ( C , reversible = True , mu = mu )
> > > T _ mu
array ( [ [ 0.94771371 , 0.00612645 , 0.04615984 ] ,
[ 0.42885157 , 0 . , 0.57114843 ] ,
[ 0.11142031 , 0.01969477 , 0.86888491 ] ] )"""
|
if issparse ( C ) :
sparse_input_type = True
elif isdense ( C ) :
sparse_input_type = False
else :
raise NotImplementedError ( 'C has an unknown type.' )
if method == 'dense' :
sparse_computation = False
elif method == 'sparse' :
sparse_computation = True
elif method == 'auto' : # heuristically determine whether is ' t more efficient to do a dense of sparse computation
if sparse_input_type :
dof = C . getnnz ( )
else :
dof = np . count_nonzero ( C )
dimension = C . shape [ 0 ]
if dimension * dimension < 3 * dof :
sparse_computation = False
else :
sparse_computation = True
else :
raise ValueError ( ( 'method="%s" is no valid choice. It should be one of' '"dense", "sparse" or "auto".' ) % method )
# convert input type
if sparse_computation and not sparse_input_type :
C = coo_matrix ( C )
if not sparse_computation and sparse_input_type :
C = C . toarray ( )
return_statdist = 'return_statdist' in kwargs
if not return_statdist :
kwargs [ 'return_statdist' ] = False
sparse_newton = kwargs . pop ( 'sparse_newton' , False )
if reversible :
rev_pisym = kwargs . pop ( 'rev_pisym' , False )
if mu is None :
if sparse_computation :
if rev_pisym :
result = sparse . transition_matrix . transition_matrix_reversible_pisym ( C , ** kwargs )
elif sparse_newton :
from msmtools . estimation . sparse . newton . mle_rev import solve_mle_rev
result = solve_mle_rev ( C , ** kwargs )
else :
result = sparse . mle_trev . mle_trev ( C , ** kwargs )
else :
if rev_pisym :
result = dense . transition_matrix . transition_matrix_reversible_pisym ( C , ** kwargs )
else :
result = dense . mle_trev . mle_trev ( C , ** kwargs )
else :
kwargs . pop ( 'return_statdist' )
# pi given , keyword unknown by estimators .
if sparse_computation : # Sparse , reversible , fixed pi ( currently using dense with sparse conversion )
result = sparse . mle_trev_given_pi . mle_trev_given_pi ( C , mu , ** kwargs )
else :
result = dense . mle_trev_given_pi . mle_trev_given_pi ( C , mu , ** kwargs )
else : # nonreversible estimation
if mu is None :
if sparse_computation : # Sparse , nonreversible
result = sparse . transition_matrix . transition_matrix_non_reversible ( C )
else : # Dense , nonreversible
result = dense . transition_matrix . transition_matrix_non_reversible ( C )
# Both methods currently do not have an iterate of pi , so we compute it here for consistency .
if return_statdist :
from msmtools . analysis import stationary_distribution
mu = stationary_distribution ( result )
else :
raise NotImplementedError ( 'nonreversible mle with fixed stationary distribution not implemented.' )
if return_statdist and isinstance ( result , tuple ) :
T , mu = result
else :
T = result
# convert return type
if sparse_computation and not sparse_input_type :
T = T . toarray ( )
elif not sparse_computation and sparse_input_type :
T = csr_matrix ( T )
if return_statdist :
return T , mu
return T
|
def build_arch ( self , arch ) :
'''Run any build tasks for the Recipe . By default , this checks if
any build _ archname methods exist for the archname of the current
architecture , and runs them if so .'''
|
build = "build_{}" . format ( arch . arch )
if hasattr ( self , build ) :
getattr ( self , build ) ( )
|
def l2_log_loss ( event_times , predicted_event_times , event_observed = None ) :
r"""Calculates the l2 log - loss of predicted event times to true event times for * non - censored *
individuals only .
. . math : : 1 / N \ sum _ { i } ( log ( t _ i ) - log ( q _ i ) ) * * 2
Parameters
event _ times : a ( n , ) array of observed survival times .
predicted _ event _ times : a ( n , ) array of predicted survival times .
event _ observed : a ( n , ) array of censorship flags , 1 if observed ,
0 if not . Default None assumes all observed .
Returns
l2 - log - loss : a scalar"""
|
if event_observed is None :
event_observed = np . ones_like ( event_times )
ix = event_observed . astype ( bool )
return np . power ( np . log ( event_times [ ix ] ) - np . log ( predicted_event_times [ ix ] ) , 2 ) . mean ( )
|
def setVisible ( self , value ) :
"""Override Qt method to stops timers if widget is not visible ."""
|
if self . timer is not None :
if value :
self . timer . start ( self . _interval )
else :
self . timer . stop ( )
super ( BaseTimerStatus , self ) . setVisible ( value )
|
def GetPathFromLink ( resource_link , resource_type = '' ) :
"""Gets path from resource link with optional resource type
: param str resource _ link :
: param str resource _ type :
: return :
Path from resource link with resource type appended ( if provided ) .
: rtype : str"""
|
resource_link = TrimBeginningAndEndingSlashes ( resource_link )
if IsNameBased ( resource_link ) : # Replace special characters in string using the % xx escape . For example , space ( ' ' ) would be replaced by % 20
# This function is intended for quoting the path section of the URL and excludes ' / ' to be quoted as that ' s the default safe char
resource_link = urllib_quote ( resource_link )
# Padding leading and trailing slashes to the path returned both for name based and resource id based links
if resource_type :
return '/' + resource_link + '/' + resource_type + '/'
else :
return '/' + resource_link + '/'
|
def concat_ast ( asts : Sequence [ DocumentNode ] ) -> DocumentNode :
"""Concat ASTs .
Provided a collection of ASTs , presumably each from different files , concatenate
the ASTs together into batched AST , useful for validating many GraphQL source files
which together represent one conceptual application ."""
|
return DocumentNode ( definitions = list ( chain . from_iterable ( document . definitions for document in asts ) ) )
|
def instagram_user_recent_media ( parser , token ) :
"""Tag for getting data about recent media of an user .
: param parser :
: param token :
: return :"""
|
try :
tagname , username = token . split_contents ( )
return InstagramUserRecentMediaNode ( username )
except ValueError :
raise template . TemplateSyntaxError ( "%r tag requires a single argument" % token . contents . split ( ) [ 0 ] )
|
def create_ipython_exports ( self ) :
""". . warning : : this feature is experimental and is currently not enabled by default ! Use with caution !
Creates attributes for all classes , methods and fields on the Analysis object itself .
This makes it easier to work with Analysis module in an iPython shell .
Classes can be search by typing : code : ` dx . CLASS _ < tab > ` , as each class is added via this attribute name .
Each class will have all methods attached to it via : code : ` dx . CLASS _ Foobar . METHOD _ < tab > ` .
Fields have a similar syntax : : code : ` dx . CLASS _ Foobar . FIELD _ < tab > ` .
As Strings can contain nearly anything , use : meth : ` find _ strings ` instead .
* Each ` CLASS _ ` item will return a : class : ` ~ ClassAnalysis `
* Each ` METHOD _ ` item will return a : class : ` ~ MethodClassAnalysis `
* Each ` FIELD _ ` item will return a : class : ` ~ FieldClassAnalysis `"""
|
# TODO : it would be fun to have the classes organized like the packages . I . e . you could do dx . CLASS _ xx . yyy . zzz
for cls in self . get_classes ( ) :
name = "CLASS_" + bytecode . FormatClassToPython ( cls . name )
if hasattr ( self , name ) :
log . warning ( "Already existing class {}!" . format ( name ) )
setattr ( self , name , cls )
for meth in cls . get_methods ( ) :
method_name = meth . name
if method_name in [ "<init>" , "<clinit>" ] :
_ , method_name = bytecode . get_package_class_name ( cls . name )
# FIXME this naming schema is not very good . . . but to describe a method uniquely , we need all of it
mname = "METH_" + method_name + "_" + bytecode . FormatDescriptorToPython ( meth . access ) + "_" + bytecode . FormatDescriptorToPython ( meth . descriptor )
if hasattr ( cls , mname ) :
log . warning ( "already existing method: {} at class {}" . format ( mname , name ) )
setattr ( cls , mname , meth )
# FIXME : syntetic classes produce problems here .
# If the field name is the same in the parent as in the syntetic one , we can only add one !
for field in cls . get_fields ( ) :
mname = "FIELD_" + bytecode . FormatNameToPython ( field . name )
if hasattr ( cls , mname ) :
log . warning ( "already existing field: {} at class {}" . format ( mname , name ) )
setattr ( cls , mname , field )
|
def on_release ( self , event ) :
'on release we reset the press data'
|
if self . press is None :
return
# print ( self . press )
x0 , y0 , btn = self . press
if btn == 1 :
color = 'r'
elif btn == 2 :
color = 'b'
# noqa
# plt . axes ( self . ax )
# plt . plot ( x0 , y0)
# button Mapping
btn = self . button_map [ btn ]
self . set_seeds ( y0 , x0 , self . actual_slice , btn )
# self . fig . canvas . draw ( )
# pdb . set _ trace ( ) ;
self . press = None
self . update_slice ( )
|
def get_least_common_subsumer ( self , from_tid , to_tid ) :
"""Returns the deepest common subsumer among two terms
@ type from _ tid : string
@ param from _ tid : one term id
@ type to _ tid : string
@ param to _ tid : another term id
@ rtype : string
@ return : the term identifier of the common subsumer"""
|
termid_from = self . terminal_for_term . get ( from_tid )
termid_to = self . terminal_for_term . get ( to_tid )
path_from = self . paths_for_terminal [ termid_from ] [ 0 ]
path_to = self . paths_for_terminal [ termid_to ] [ 0 ]
common_nodes = set ( path_from ) & set ( path_to )
if len ( common_nodes ) == 0 :
return None
else :
indexes = [ ]
for common_node in common_nodes :
index1 = path_from . index ( common_node )
index2 = path_to . index ( common_node )
indexes . append ( ( common_node , index1 + index2 ) )
indexes . sort ( key = itemgetter ( 1 ) )
shortest_common = indexes [ 0 ] [ 0 ]
return shortest_common
|
def evaluations ( ty , pv , useScipy = True ) :
"""evaluations ( ty , pv , useScipy ) - > ( ACC , MSE , SCC )
ty , pv : list , tuple or ndarray
useScipy : convert ty , pv to ndarray , and use scipy functions for the evaluation
Calculate accuracy , mean squared error and squared correlation coefficient
using the true values ( ty ) and predicted values ( pv ) ."""
|
if scipy != None and useScipy :
return evaluations_scipy ( scipy . asarray ( ty ) , scipy . asarray ( pv ) )
if len ( ty ) != len ( pv ) :
raise ValueError ( "len(ty) must be equal to len(pv)" )
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v , y in zip ( pv , ty ) :
if y == v :
total_correct += 1
total_error += ( v - y ) * ( v - y )
sumv += v
sumy += y
sumvv += v * v
sumyy += y * y
sumvy += v * y
l = len ( ty )
ACC = 100.0 * total_correct / l
MSE = total_error / l
try :
SCC = ( ( l * sumvy - sumv * sumy ) * ( l * sumvy - sumv * sumy ) ) / ( ( l * sumvv - sumv * sumv ) * ( l * sumyy - sumy * sumy ) )
except :
SCC = float ( 'nan' )
return ( float ( ACC ) , float ( MSE ) , float ( SCC ) )
|
def merge ( self , grid = None , merge_points = True , inplace = False , main_has_priority = True ) :
"""Join one or many other grids to this grid . Grid is updated
in - place by default .
Can be used to merge points of adjcent cells when no grids
are input .
Parameters
grid : vtk . UnstructuredGrid or list of vtk . UnstructuredGrids
Grids to merge to this grid .
merge _ points : bool , optional
Points in exactly the same location will be merged between
the two meshes .
inplace : bool , optional
Updates grid inplace when True .
main _ has _ priority : bool , optional
When this parameter is true and merge _ points is true ,
the scalar arrays of the merging grids will be overwritten
by the original main mesh .
Returns
merged _ grid : vtk . UnstructuredGrid
Merged grid . Returned when inplace is False .
Notes
When two or more grids are joined , the type and name of each
scalar array must match or the arrays will be ignored and not
included in the final merged mesh ."""
|
append_filter = vtk . vtkAppendFilter ( )
append_filter . SetMergePoints ( merge_points )
if not main_has_priority :
append_filter . AddInputData ( self )
if isinstance ( grid , vtki . UnstructuredGrid ) :
append_filter . AddInputData ( grid )
elif isinstance ( grid , list ) :
grids = grid
for grid in grids :
append_filter . AddInputData ( grid )
if main_has_priority :
append_filter . AddInputData ( self )
append_filter . Update ( )
merged = _get_output ( append_filter )
if inplace :
self . DeepCopy ( merged )
else :
return merged
|
def serialize ( self , data , investigation_time ) :
"""Serialize a collection of stochastic event sets to XML .
: param data :
A dictionary src _ group _ id - > list of
: class : ` openquake . commonlib . calc . Rupture ` objects .
Each Rupture should have the following attributes :
* ` rupid `
* ` events _ by _ ses `
* ` magnitude `
* ` strike `
* ` dip `
* ` rake `
* ` tectonic _ region _ type `
* ` is _ from _ fault _ source ` ( a ` bool ` )
* ` is _ multi _ surface ` ( a ` bool ` )
* ` lons `
* ` lats `
* ` depths `
If ` is _ from _ fault _ source ` is ` True ` , the rupture originated from a
simple or complex fault sources . In this case , ` lons ` , ` lats ` , and
` depths ` should all be 2D arrays ( of uniform shape ) . These
coordinate triples represent nodes of the rupture mesh .
If ` is _ from _ fault _ source ` is ` False ` , the rupture originated from a
point or area source . In this case , the rupture is represented by a
quadrilateral planar surface . This planar surface is defined by 3D
vertices . In this case , the rupture should have the following
attributes :
* ` top _ left _ corner `
* ` top _ right _ corner `
* ` bottom _ right _ corner `
* ` bottom _ left _ corner `
Each of these should be a triple of ` lon ` , ` lat ` , ` depth ` .
If ` is _ multi _ surface ` is ` True ` , the rupture originated from a
multi - surface source . In this case , ` lons ` , ` lats ` , and ` depths `
should have uniform length . The length should be a multiple of 4,
where each segment of 4 represents the corner points of a planar
surface in the following order :
* top left
* top right
* bottom left
* bottom right
Each of these should be a triple of ` lon ` , ` lat ` , ` depth ` .
: param investigation _ time :
Investigation time parameter specified in the job . ini"""
|
with open ( self . dest , 'wb' ) as fh :
root = et . Element ( 'nrml' )
ses_container = et . SubElement ( root , 'ruptureCollection' )
ses_container . set ( 'investigationTime' , str ( investigation_time ) )
for grp_id in sorted ( data ) :
attrs = dict ( id = grp_id , tectonicRegion = data [ grp_id ] [ 0 ] . tectonic_region_type )
sg = et . SubElement ( ses_container , 'ruptureGroup' , attrs )
for rupture in data [ grp_id ] :
rupture_to_element ( rupture , sg )
nrml . write ( list ( root ) , fh )
|
def _to_inline_css ( self , style ) :
"""Return inline CSS from CSS key / values"""
|
return "; " . join ( [ '{}: {}' . format ( convert_style_key ( k ) , v ) for k , v in style . items ( ) ] )
|
def bodvcd ( bodyid , item , maxn ) :
"""Fetch from the kernel pool the double precision values of an item
associated with a body , where the body is specified by an integer ID
code .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / bodvcd _ c . html
: param bodyid : Body ID code .
: type bodyid : int
: param item :
Item for which values are desired ,
( " RADII " , " NUT _ PREC _ ANGLES " , etc . )
: type item : str
: param maxn : Maximum number of values that may be returned .
: type maxn : int
: return : dim , values
: rtype : tuple"""
|
bodyid = ctypes . c_int ( bodyid )
item = stypes . stringToCharP ( item )
dim = ctypes . c_int ( )
values = stypes . emptyDoubleVector ( maxn )
maxn = ctypes . c_int ( maxn )
libspice . bodvcd_c ( bodyid , item , maxn , ctypes . byref ( dim ) , values )
return dim . value , stypes . cVectorToPython ( values )
|
def coefficients ( self ) :
"""All of the coefficient arrays
This property is the concatenation of the results from
: func : ` terms . Term . get _ real _ coefficients ` and
: func : ` terms . Term . get _ complex _ coefficients ` but it will always return
a tuple of length 6 , even if ` ` alpha _ complex _ imag ` ` was omitted from
` ` get _ complex _ coefficients ` ` .
Returns :
( array [ j _ real ] , array [ j _ real ] , array [ j _ complex ] , array [ j _ complex ] ,
array [ j _ complex ] , array [ j _ complex ] ) : ` ` alpha _ real ` ` , ` ` beta _ real ` ` ,
` ` alpha _ complex _ real ` ` , ` ` alpha _ complex _ imag ` ` ,
` ` beta _ complex _ real ` ` , and ` ` beta _ complex _ imag ` ` as described
above .
Raises :
ValueError : For invalid dimensions for the coefficients ."""
|
vector = self . get_parameter_vector ( include_frozen = True )
pars = self . get_all_coefficients ( vector )
if len ( pars ) != 6 :
raise ValueError ( "there must be 6 coefficient blocks" )
if any ( len ( p . shape ) != 1 for p in pars ) :
raise ValueError ( "coefficient blocks must be 1D" )
if len ( pars [ 0 ] ) != len ( pars [ 1 ] ) :
raise ValueError ( "coefficient blocks must have the same shape" )
if any ( len ( pars [ 2 ] ) != len ( p ) for p in pars [ 3 : ] ) :
raise ValueError ( "coefficient blocks must have the same shape" )
return pars
|
def _bindDomain ( self , domain_name , create = False , block = True ) :
"""Return the Boto Domain object representing the SDB domain of the given name . If the
domain does not exist and ` create ` is True , it will be created .
: param str domain _ name : the name of the domain to bind to
: param bool create : True if domain should be created if it doesn ' t exist
: param bool block : If False , return None if the domain doesn ' t exist . If True , wait until
domain appears . This parameter is ignored if create is True .
: rtype : Domain | None
: raises SDBResponseError : If ` block ` is True and the domain still doesn ' t exist after the
retry timeout expires ."""
|
log . debug ( "Binding to job store domain '%s'." , domain_name )
retryargs = dict ( predicate = lambda e : no_such_sdb_domain ( e ) or sdb_unavailable ( e ) )
if not block :
retryargs [ 'timeout' ] = 15
for attempt in retry_sdb ( ** retryargs ) :
with attempt :
try :
return self . db . get_domain ( domain_name )
except SDBResponseError as e :
if no_such_sdb_domain ( e ) :
if create :
return self . db . create_domain ( domain_name )
elif block :
raise
else :
return None
else :
raise
|
def insert ( self , ** kwargs ) :
"""Insert commands at the beginning of the sequence .
This is provided because certain commands
have to come first ( such as user creation ) , but may be need to beadded
after other commands have already been specified .
Later calls to insert put their commands before those in the earlier calls .
Also , since the order of iterated kwargs is not guaranteed ( in Python 2 . x ) ,
you should really only call insert with one keyword at a time . See the doc of append
for more details .
: param kwargs : the key / value pair to append first
: return : the action , so you can append Action ( . . . ) . insert ( . . . ) . append ( . . . )"""
|
for k , v in six . iteritems ( kwargs ) :
self . commands . insert ( 0 , { k : v } )
return self
|
def parse_type ( parser ) : # type : ( Parser ) - > Union [ NamedType , NonNullType , ListType ]
"""Handles the ' Type ' : TypeName , ListType , and NonNullType
parsing rules ."""
|
start = parser . token . start
if skip ( parser , TokenKind . BRACKET_L ) :
ast_type = parse_type ( parser )
expect ( parser , TokenKind . BRACKET_R )
ast_type = ast . ListType ( type = ast_type , loc = loc ( parser , start ) )
# type : ignore
else :
ast_type = parse_named_type ( parser )
if skip ( parser , TokenKind . BANG ) :
return ast . NonNullType ( type = ast_type , loc = loc ( parser , start ) )
return ast_type
|
def parse_findPeaks ( self , f ) :
"""Parse HOMER findPeaks file headers ."""
|
parsed_data = dict ( )
s_name = f [ 's_name' ]
for l in f [ 'f' ] : # Start of data
if l . strip ( ) and not l . strip ( ) . startswith ( '#' ) :
break
# Automatically parse header lines by = symbol
s = l [ 2 : ] . split ( '=' )
if len ( s ) > 1 :
k = s [ 0 ] . strip ( ) . replace ( ' ' , '_' ) . lower ( )
v = s [ 1 ] . strip ( ) . replace ( '%' , '' )
try :
parsed_data [ k ] = float ( v )
except ValueError :
parsed_data [ k ] = v
if k == 'tag_directory' :
s_name = self . clean_s_name ( os . path . basename ( v ) , os . path . dirname ( v ) )
if len ( parsed_data ) > 0 :
if s_name in self . homer_findpeaks :
log . debug ( "Duplicate sample name found in {}! Overwriting: {}" . format ( f [ 'fn' ] , s_name ) )
self . add_data_source ( f , s_name , section = 'findPeaks' )
self . homer_findpeaks [ s_name ] = parsed_data
|
def cwise ( tf_fn , xs , output_dtype = None , grad_function = None , name = None ) :
"""Component - wise operation with no broadcasting .
Args :
tf _ fn : a component - wise function taking n tf . Tensor inputs and producing
a tf . Tensor output
xs : n Tensors
output _ dtype : an optional dtype
grad _ function : an optional python function
name : an optional string
Returns :
a Tensor"""
|
return slicewise ( tf_fn , xs , output_dtype = output_dtype , splittable_dims = xs [ 0 ] . shape . dims , grad_function = grad_function , name = name or "cwise" )
|
def get_limits ( self ) :
"""Return all known limits for this service , as a dict of their names
to : py : class : ` ~ . AwsLimit ` objects .
: returns : dict of limit names to : py : class : ` ~ . AwsLimit ` objects
: rtype : dict"""
|
logger . debug ( "Gathering %s's limits from AWS" , self . service_name )
if self . limits :
return self . limits
limits = { }
limits [ 'Trails Per Region' ] = AwsLimit ( 'Trails Per Region' , self , 5 , self . warning_threshold , self . critical_threshold , limit_type = self . aws_type )
limits [ 'Event Selectors Per Trail' ] = AwsLimit ( 'Event Selectors Per Trail' , self , 5 , self . warning_threshold , self . critical_threshold , limit_type = self . aws_type , limit_subtype = 'AWS::CloudTrail::EventSelector' )
limits [ 'Data Resources Per Trail' ] = AwsLimit ( 'Data Resources Per Trail' , self , 250 , self . warning_threshold , self . critical_threshold , limit_type = self . aws_type , limit_subtype = 'AWS::CloudTrail::DataResource' )
self . limits = limits
return limits
|
def get_bank_hierarchy_design_session ( self , proxy ) :
"""Gets the session designing bank hierarchies .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . assessment . BankHierarchyDesignSession ) - a
` ` BankHierarchySession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ bank _ hierarchy _ design ( ) is
false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ bank _ hierarchy _ design ( ) ` ` is true . *"""
|
if not self . supports_bank_hierarchy_design ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . BankHierarchyDesignSession ( proxy = proxy , runtime = self . _runtime )
|
def save_hdf ( self , filename , path = '' , overwrite = False , append = False ) :
"""Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification , spectroscopy , parallax to attrs"""
|
if os . path . exists ( filename ) :
store = pd . HDFStore ( filename )
if path in store :
store . close ( )
if overwrite :
os . remove ( filename )
elif not append :
raise IOError ( '{} in {} exists. Set either overwrite or append option.' . format ( path , filename ) )
else :
store . close ( )
df = self . to_df ( )
df . to_hdf ( filename , path + '/df' )
with pd . HDFStore ( filename ) as store : # store = pd . HDFStore ( filename )
attrs = store . get_storer ( path + '/df' ) . attrs
attrs . spectroscopy = self . spectroscopy
attrs . parallax = self . parallax
attrs . N = self . _N
attrs . index = self . _index
store . close ( )
|
def fancy_handler ( signum , frame , spinner ) :
"""Signal handler , used to gracefully shut down the ` ` spinner ` ` instance
when specified signal is received by the process running the ` ` spinner ` ` .
` ` signum ` ` and ` ` frame ` ` are mandatory arguments . Check ` ` signal . signal ` `
function for more details ."""
|
spinner . red . fail ( "✘" )
spinner . stop ( )
sys . exit ( 0 )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.