signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def anglesep_meeus ( lon0 : float , lat0 : float , lon1 : float , lat1 : float , deg : bool = True ) -> float : """Parameters lon0 : float or numpy . ndarray of float longitude of first point lat0 : float or numpy . ndarray of float latitude of first point lon1 : float or numpy . ndarray of float longitude of second point lat1 : float or numpy . ndarray of float latitude of second point deg : bool , optional degrees input / output ( False : radians in / out ) Returns sep _ rad : float or numpy . ndarray of float angular separation Meeus p . 109 from " Astronomical Algorithms " by Jean Meeus Ch . 16 p . 111 ( 16.5) gives angular distance in degrees between two rightAscension , Declination points in the sky . Neglecting atmospheric effects , of course . Meeus haversine method is stable all the way to exactly 0 deg . either the arrays must be the same size , or one of them must be a scalar"""
if deg : lon0 = radians ( lon0 ) lat0 = radians ( lat0 ) lon1 = radians ( lon1 ) lat1 = radians ( lat1 ) sep_rad = 2 * arcsin ( sqrt ( haversine ( lat0 - lat1 ) + cos ( lat0 ) * cos ( lat1 ) * haversine ( lon0 - lon1 ) ) ) if deg : return degrees ( sep_rad ) else : return sep_rad
def create ( self , name , ** params ) : """Creates a new entity in this collection . This function makes either one or two roundtrips to the server , depending on the type of entities in this collection , plus at most two more if the ` ` autologin ` ` field of : func : ` connect ` is set to ` ` True ` ` . : param name : The name of the entity to create . : type name : ` ` string ` ` : param namespace : A namespace , as created by the : func : ` splunklib . binding . namespace ` function ( optional ) . You can also set ` ` owner ` ` , ` ` app ` ` , and ` ` sharing ` ` in ` ` params ` ` . : type namespace : A : class : ` splunklib . data . Record ` object with keys ` ` owner ` ` , ` ` app ` ` , and ` ` sharing ` ` . : param params : Additional entity - specific arguments ( optional ) . : type params : ` ` dict ` ` : return : The new entity . : rtype : A subclass of : class : ` Entity ` , chosen by : meth : ` Collection . self . item ` . * * Example * * : : import splunklib . client as client s = client . connect ( . . . ) applications = s . apps new _ app = applications . create ( " my _ fake _ app " )"""
if not isinstance ( name , basestring ) : raise InvalidNameException ( "%s is not a valid name for an entity." % name ) if 'namespace' in params : namespace = params . pop ( 'namespace' ) params [ 'owner' ] = namespace . owner params [ 'app' ] = namespace . app params [ 'sharing' ] = namespace . sharing response = self . post ( name = name , ** params ) atom = _load_atom ( response , XNAME_ENTRY ) if atom is None : # This endpoint doesn ' t return the content of the new # item . We have to go fetch it ourselves . return self [ name ] else : entry = atom . entry state = _parse_atom_entry ( entry ) entity = self . item ( self . service , self . _entity_path ( state ) , state = state ) return entity
def versatile_options ( ) : """return list of options that can be changed at any time ( not only be initialized ) , however the list might not be entirely up to date . The string ' # v ' in the default value indicates a ' versatile ' option that can be changed any time ."""
return tuple ( sorted ( i [ 0 ] for i in list ( CMAOptions . defaults ( ) . items ( ) ) if i [ 1 ] . find ( ' #v ' ) > 0 ) )
def get_binding ( self , schema , data ) : """For a given schema , get a binding mediator providing links to the RDF terms matching that schema ."""
schema = self . parent . get_schema ( schema ) return Binding ( schema , self . parent . resolver , data = data )
def getValue ( self ) : """Returns PromisedRequirement value"""
func = dill . loads ( self . _func ) return func ( * self . _args )
def random_line_data ( chars_per_line = 80 ) : """Function to create a line of a random string Args : chars _ per _ line : An integer that says how many characters to return Returns : A String"""
return '' . join ( __random . choice ( __string . ascii_letters ) for x in range ( chars_per_line ) )
def remove_all ( self , locator ) : """Removes all component references that match the specified locator . : param locator : a locator to remove reference by . : return : a list , containing all removed references ."""
components = [ ] if locator == None : return components self . _lock . acquire ( ) try : for reference in reversed ( self . _references ) : if reference . match ( locator ) : self . _references . remove ( reference ) components . append ( reference . get_component ( ) ) finally : self . _lock . release ( ) return components
def getMonitorByName ( self , monitorFriendlyName ) : """Returns monitor status and alltimeuptimeratio for a MonitorFriendlyName ."""
url = self . baseUrl url += "getMonitors?apiKey=%s" % self . apiKey url += "&noJsonCallback=1&format=json" success , response = self . requestApi ( url ) if success : monitors = response . get ( 'monitors' ) . get ( 'monitor' ) for i in range ( len ( monitors ) ) : monitor = monitors [ i ] if monitor . get ( 'friendlyname' ) == monitorFriendlyName : status = monitor . get ( 'status' ) alltimeuptimeratio = monitor . get ( 'alltimeuptimeratio' ) return status , alltimeuptimeratio return None , None
def run_subprocess ( command , return_code = False , ** kwargs ) : """Run command using subprocess . Popen Run command and wait for command to complete . If the return code was zero then return , otherwise raise CalledProcessError . By default , this will also add stdout = and stderr = subproces . PIPE to the call to Popen to suppress printing to the terminal . Parameters command : list of str Command to run as subprocess ( see subprocess . Popen documentation ) . return _ code : bool If True , the returncode will be returned , and no error checking will be performed ( so this function should always return without error ) . * * kwargs : dict Additional kwargs to pass to ` ` subprocess . Popen ` ` . Returns stdout : str Stdout returned by the process . stderr : str Stderr returned by the process . code : int The command exit code . Only returned if ` ` return _ code ` ` is True ."""
# code adapted with permission from mne - python use_kwargs = dict ( stderr = subprocess . PIPE , stdout = subprocess . PIPE ) use_kwargs . update ( kwargs ) p = subprocess . Popen ( command , ** use_kwargs ) output = p . communicate ( ) # communicate ( ) may return bytes , str , or None depending on the kwargs # passed to Popen ( ) . Convert all to unicode str : output = [ '' if s is None else s for s in output ] output = [ s . decode ( 'utf-8' ) if isinstance ( s , bytes ) else s for s in output ] output = tuple ( output ) if not return_code and p . returncode : print ( output [ 0 ] ) print ( output [ 1 ] ) err_fun = subprocess . CalledProcessError . __init__ if 'output' in inspect . getargspec ( err_fun ) . args : raise subprocess . CalledProcessError ( p . returncode , command , output ) else : raise subprocess . CalledProcessError ( p . returncode , command ) if return_code : output = output + ( p . returncode , ) return output
def script ( container , script_path , fail_nonzero = False , upload_dir = False , ** kwargs ) : """Runs a script inside a container , which is created with all its dependencies . The container is removed after it has been run , whereas the dependencies are not destroyed . The output is printed to the console . : param container : Container configuration name . : param script _ path : Local path to the script file . : param fail _ nonzero : Fail if the script returns with a nonzero exit code . : param upload _ dir : Upload the entire parent directory of the script file to the remote . : param kwargs : Additional keyword arguments to the run _ script action ."""
full_script_path = os . path . abspath ( script_path ) prefix , name = os . path . split ( full_script_path ) with temp_dir ( ) as remote_tmp : if upload_dir : prefix_path , prefix_name = os . path . split ( prefix ) remote_script = posixpath . join ( remote_tmp , prefix_name , name ) put ( prefix , remote_tmp , mirror_local_mode = True ) else : remote_script = posixpath . join ( remote_tmp , name ) put ( script_path , remote_script , mirror_local_mode = True ) results = [ output . result for output in container_fabric ( ) . run_script ( container , script_path = remote_script , ** kwargs ) if o . action_type == ContainerUtilAction . SCRIPT ] for res in results : puts ( "Exit code: {0}" . format ( res [ 'exit_code' ] ) ) if res [ 'exit_code' ] == 0 or not fail_nonzero : puts ( res [ 'log' ] ) else : error ( res [ 'log' ] )
def match_replace_binary ( cls , ops , kwargs ) : """Similar to func : ` match _ replace ` , but for arbitrary length operations , such that each two pairs of subsequent operands are matched pairwise . > > > A = wc ( " A " ) > > > class FilterDupes ( Operation ) : . . . _ binary _ rules = { . . . ' filter _ dupes ' : ( pattern _ head ( A , A ) , lambda A : A ) } . . . simplifications = [ match _ replace _ binary , assoc ] . . . _ neutral _ element = 0 > > > FilterDupes . create ( 1,2,3,4 ) # No duplicates FilterDupes ( 1 , 2 , 3 , 4) > > > FilterDupes . create ( 1,2,2,3,4 ) # Some duplicates FilterDupes ( 1 , 2 , 3 , 4) Note that this only works for * subsequent * duplicate entries : > > > FilterDupes . create ( 1,2,3,2,4 ) # No * subsequent * duplicates FilterDupes ( 1 , 2 , 3 , 2 , 4) Any operation that uses binary reduction must be associative and define a neutral element . The binary rules must be compatible with associativity , i . e . there is no specific order in which the rules are applied to pairs of operands ."""
assert assoc in cls . simplifications , ( cls . __name__ + " must be associative to use match_replace_binary" ) assert hasattr ( cls , '_neutral_element' ) , ( cls . __name__ + " must define a neutral element to use " "match_replace_binary" ) fops = _match_replace_binary ( cls , list ( ops ) ) if len ( fops ) == 1 : return fops [ 0 ] elif len ( fops ) == 0 : return cls . _neutral_element else : return fops , kwargs
def snap ( self , instruction ) : """Returns a new MayaDT object modified by the given instruction . Powered by snaptime . See https : / / github . com / zartstrom / snaptime for a complete documentation about the snaptime instructions ."""
return self . from_datetime ( snaptime . snap ( self . datetime ( ) , instruction ) )
def read ( self , size = None ) : """Return the next size number of bytes from the stream . If size is not defined , return all bytes of the stream up to EOF ."""
if size is None : t = [ ] while True : buf = self . _read ( self . bufsize ) if not buf : break t . append ( buf ) buf = "" . join ( t ) else : buf = self . _read ( size ) self . pos += len ( buf ) return buf
def get_slo_url ( self ) : """Gets the SLO URL . : returns : An URL , the SLO endpoint of the IdP : rtype : string"""
url = None idp_data = self . __settings . get_idp_data ( ) if 'singleLogoutService' in idp_data . keys ( ) and 'url' in idp_data [ 'singleLogoutService' ] : url = idp_data [ 'singleLogoutService' ] [ 'url' ] return url
def SetValue ( self , Channel , Parameter , Buffer ) : """Returns a descriptive text of a given TPCANStatus error code , in any desired language Remarks : Parameters can be present or not according with the kind of Hardware ( PCAN Channel ) being used . If a parameter is not available , a PCAN _ ERROR _ ILLPARAMTYPE error will be returned . Parameters : Channel : A TPCANHandle representing a PCAN Channel Parameter : The TPCANParameter parameter to set Buffer : Buffer with the value to be set BufferLength : Size in bytes of the buffer Returns : A TPCANStatus error code"""
try : if Parameter == PCAN_LOG_LOCATION or Parameter == PCAN_LOG_TEXT or Parameter == PCAN_TRACE_LOCATION : mybuffer = create_string_buffer ( 256 ) else : mybuffer = c_int ( 0 ) mybuffer . value = Buffer res = self . __m_dllBasic . CAN_SetValue ( Channel , Parameter , byref ( mybuffer ) , sizeof ( mybuffer ) ) return TPCANStatus ( res ) except : logger . error ( "Exception on PCANBasic.SetValue" ) raise
def _is_visible ( cls , property_name ) : """private method to check visible object property to be visible"""
if isinstance ( property_name , list ) : return [ cls . _is_visible ( p ) for p in property_name ] if property_name . startswith ( '__' ) and property_name . endswith ( '__' ) : return False return property_name . startswith ( cls . STARTS_WITH ) and property_name . endswith ( cls . ENDS_WITH )
def status ( self , migration_rqst_id = "" , block_name = "" , dataset = "" , user = "" ) : """Interface to query status of a migration request In this preference order of input parameters : migration _ rqst _ id , block , dataset , user ( if multi parameters are provided , only the precedence order is followed )"""
try : return self . dbsMigrate . listMigrationRequests ( migration_rqst_id , block_name , dataset , user ) except dbsException as de : dbsExceptionHandler ( de . eCode , de . message , self . logger . exception , de . serverError ) except Exception as ex : sError = "DBSMigrateModle/status. %s\n Exception trace: \n %s." % ( ex , traceback . format_exc ( ) ) if hasattr ( ex , 'status' ) and ex . status == 400 : dbsExceptionHandler ( 'dbsException-invalid-input2' , str ( ex ) , self . logger . exception , sError ) else : dbsExceptionHandler ( 'dbsException-server-error' , str ( ex ) , self . logger . exception , sError )
def node_label_absent ( name , node , ** kwargs ) : '''Ensures that the named label is absent from the node . name The name of the label node The name of the node'''
ret = { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : '' } labels = __salt__ [ 'kubernetes.node_labels' ] ( node , ** kwargs ) if name not in labels : ret [ 'result' ] = True if not __opts__ [ 'test' ] else None ret [ 'comment' ] = 'The label does not exist' return ret if __opts__ [ 'test' ] : ret [ 'comment' ] = 'The label is going to be deleted' ret [ 'result' ] = None return ret __salt__ [ 'kubernetes.node_remove_label' ] ( node_name = node , label_name = name , ** kwargs ) ret [ 'result' ] = True ret [ 'changes' ] = { 'kubernetes.node_label' : { 'new' : 'absent' , 'old' : 'present' } } ret [ 'comment' ] = 'Label removed from node' return ret
def windspeed ( self , t ) : """Return the wind speed list at time ` t `"""
ws = [ 0 ] * self . n for i in range ( self . n ) : q = ceil ( t / self . dt [ i ] ) q_prev = 0 if q == 0 else q - 1 r = t % self . dt [ i ] r = 0 if abs ( r ) < 1e-6 else r if r == 0 : ws [ i ] = self . speed [ i ] [ q ] else : t1 = self . time [ i ] [ q_prev ] s1 = self . speed [ i ] [ q_prev ] s2 = self . speed [ i ] [ q ] ws [ i ] = s1 + ( t - t1 ) * ( s2 - s1 ) / self . dt [ i ] return matrix ( ws )
def index ( request , template_name = "tagging_ext/index.html" , min_size = 0 , limit = 10 ) : """min _ size : Smallest size count accepted for a tag order _ by : asc or desc by count limit : maximum number of tags to display TODO : convert the hand - written query to an ORM call . Right now I know this works with Sqlite3 and PostGreSQL ."""
query = """ SELECT tag_item.tag_id as tag_id, COUNT(tag_item.tag_id) as counter FROM tagging_taggeditem as tag_item GROUP BY tag_id HAVING COUNT(tag_item.tag_id) > %s ORDER BY counter desc LIMIT %s """ cursor = connection . cursor ( ) cursor . execute ( query , [ min_size , limit ] ) results = [ ] for row in cursor . fetchall ( ) : try : tag = Tag . objects . get ( id = row [ 0 ] ) except ObjectDoesNotExist : continue if ' ' in tag . name : continue record = dict ( tag = tag , count = row [ 1 ] ) results . append ( record ) dictionary = { 'tags' : results } return render_to_response ( template_name , dictionary , context_instance = RequestContext ( request ) )
def make_list ( args ) : """Generates . lst file . Parameters args : object that contains all the arguments"""
image_list = list_image ( args . root , args . recursive , args . exts ) image_list = list ( image_list ) if args . shuffle is True : random . seed ( 100 ) random . shuffle ( image_list ) N = len ( image_list ) chunk_size = ( N + args . chunks - 1 ) // args . chunks for i in range ( args . chunks ) : chunk = image_list [ i * chunk_size : ( i + 1 ) * chunk_size ] if args . chunks > 1 : str_chunk = '_%d' % i else : str_chunk = '' sep = int ( chunk_size * args . train_ratio ) sep_test = int ( chunk_size * args . test_ratio ) if args . train_ratio == 1.0 : write_list ( args . prefix + str_chunk + '.lst' , chunk ) else : if args . test_ratio : write_list ( args . prefix + str_chunk + '_test.lst' , chunk [ : sep_test ] ) if args . train_ratio + args . test_ratio < 1.0 : write_list ( args . prefix + str_chunk + '_val.lst' , chunk [ sep_test + sep : ] ) write_list ( args . prefix + str_chunk + '_train.lst' , chunk [ sep_test : sep_test + sep ] )
def reshape_line_plot ( df , x , y ) : """Reshape data from long form to " line plot form " . Line plot form has x value as the index with one column for each line . Each column has data points as values and all metadata as column headers ."""
idx = list ( df . columns . drop ( y ) ) if df . duplicated ( idx ) . any ( ) : warnings . warn ( 'Duplicated index found.' ) df = df . drop_duplicates ( idx , keep = 'last' ) df = df . set_index ( idx ) [ y ] . unstack ( x ) . T return df
def computePWCorrelations ( spikeTrains , removeAutoCorr ) : """Computes pairwise correlations from spikeTrains @ param spikeTrains ( array ) spike trains obtained from the activation of cells in the TM the array dimensions are : numCells x timeSteps @ param removeAutoCorr ( boolean ) if true , auto - correlations are removed by substracting the diagonal of the correlation matrix @ return corrMatrix ( array ) numCells x numCells matrix containing the Pearson correlation coefficient of spike trains of cell i and cell j @ return numNegPCC ( int ) number of negative pairwise correlations ( PCC ( i , j ) < 0)"""
numCells = np . shape ( spikeTrains ) [ 0 ] corrMatrix = np . zeros ( ( numCells , numCells ) ) numNegPCC = 0 for i in range ( numCells ) : for j in range ( numCells ) : if i == j and removeAutoCorr == True : continue if not all ( spikeTrains [ i , : ] == 0 ) and not all ( spikeTrains [ j , : ] == 0 ) : corrMatrix [ i , j ] = np . corrcoef ( spikeTrains [ i , : ] , spikeTrains [ j , : ] ) [ 0 , 1 ] if corrMatrix [ i , j ] < 0 : numNegPCC += 1 return ( corrMatrix , numNegPCC )
def cli ( env , identifier ) : """Cancel a dedicated host server immediately"""
mgr = SoftLayer . DedicatedHostManager ( env . client ) host_id = helpers . resolve_id ( mgr . resolve_ids , identifier , 'dedicated host' ) if not ( env . skip_confirmations or formatting . no_going_back ( host_id ) ) : raise exceptions . CLIAbort ( 'Aborted' ) mgr . cancel_host ( host_id ) click . secho ( 'Dedicated Host %s was cancelled' % host_id , fg = 'green' )
def ggplot2_style ( ax ) : """Styles an axes to appear like ggplot2 Must be called after all plot and axis manipulation operations have been carried out ( needs to know final tick spacing )"""
# set the style of the major and minor grid lines , filled blocks ax . grid ( True , 'major' , color = 'w' , linestyle = '-' , linewidth = 1.4 ) ax . grid ( True , 'minor' , color = '0.92' , linestyle = '-' , linewidth = 0.7 ) ax . patch . set_facecolor ( '0.85' ) ax . set_axisbelow ( True ) # set minor tick spacing to 1/2 of the major ticks ax . xaxis . set_minor_locator ( MultipleLocator ( ( plt . xticks ( ) [ 0 ] [ 1 ] - plt . xticks ( ) [ 0 ] [ 0 ] ) / 2.0 ) ) ax . yaxis . set_minor_locator ( MultipleLocator ( ( plt . yticks ( ) [ 0 ] [ 1 ] - plt . yticks ( ) [ 0 ] [ 0 ] ) / 2.0 ) ) # remove axis border for child in ax . get_children ( ) : if isinstance ( child , mpl . spines . Spine ) : child . set_alpha ( 0 ) # restyle the tick lines for line in ax . get_xticklines ( ) + ax . get_yticklines ( ) : line . set_markersize ( 5 ) line . set_color ( "gray" ) line . set_markeredgewidth ( 1.4 ) # remove the minor tick lines for line in ax . xaxis . get_ticklines ( minor = True ) + ax . yaxis . get_ticklines ( minor = True ) : line . set_markersize ( 0 ) # only show bottom left ticks , pointing out of axis mpl . rcParams [ 'xtick.direction' ] = 'out' mpl . rcParams [ 'ytick.direction' ] = 'out' ax . xaxis . set_ticks_position ( 'bottom' ) ax . yaxis . set_ticks_position ( 'left' ) if ax . legend_ <> None : lg = ax . legend_ lg . get_frame ( ) . set_linewidth ( 0 ) lg . get_frame ( ) . set_alpha ( 0.5 )
def accelerated_dtw ( x , y , dist , warp = 1 ) : """Computes Dynamic Time Warping ( DTW ) of two sequences in a faster way . Instead of iterating through each element and calculating each distance , this uses the cdist function from scipy ( https : / / docs . scipy . org / doc / scipy / reference / generated / scipy . spatial . distance . cdist . html ) : param array x : N1 * M array : param array y : N2 * M array : param string or func dist : distance parameter for cdist . When string is given , cdist uses optimized functions for the distance metrics . If a string is passed , the distance function can be ' braycurtis ' , ' canberra ' , ' chebyshev ' , ' cityblock ' , ' correlation ' , ' cosine ' , ' dice ' , ' euclidean ' , ' hamming ' , ' jaccard ' , ' kulsinski ' , ' mahalanobis ' , ' matching ' , ' minkowski ' , ' rogerstanimoto ' , ' russellrao ' , ' seuclidean ' , ' sokalmichener ' , ' sokalsneath ' , ' sqeuclidean ' , ' wminkowski ' , ' yule ' . : param int warp : how many shifts are computed . Returns the minimum distance , the cost matrix , the accumulated cost matrix , and the wrap path ."""
assert len ( x ) assert len ( y ) if ndim ( x ) == 1 : x = x . reshape ( - 1 , 1 ) if ndim ( y ) == 1 : y = y . reshape ( - 1 , 1 ) r , c = len ( x ) , len ( y ) D0 = zeros ( ( r + 1 , c + 1 ) ) D0 [ 0 , 1 : ] = inf D0 [ 1 : , 0 ] = inf D1 = D0 [ 1 : , 1 : ] D0 [ 1 : , 1 : ] = cdist ( x , y , dist ) C = D1 . copy ( ) for i in range ( r ) : for j in range ( c ) : min_list = [ D0 [ i , j ] ] for k in range ( 1 , warp + 1 ) : min_list += [ D0 [ min ( i + k , r ) , j ] , D0 [ i , min ( j + k , c ) ] ] D1 [ i , j ] += min ( min_list ) if len ( x ) == 1 : path = zeros ( len ( y ) ) , range ( len ( y ) ) elif len ( y ) == 1 : path = range ( len ( x ) ) , zeros ( len ( x ) ) else : path = _traceback ( D0 ) return D1 [ - 1 , - 1 ] / sum ( D1 . shape ) , C , D1 , path
def annotate_bed ( self , bt , name , col_name , complete = None , df_col = None , ) : """Annotate the input bed file using one of the annotation beds . Parameters bt : pybedtools . BedTool BedTool for either one of the anchors , the loops , or the loop inners . name : str The key for the annoation bed file in annot _ beds . col _ name : str Used to name the columns that will be made . complete : bool If True , this method will check whether the features in the annotation bed are completely contained by the features in the input bed . df _ col : str If the name for bt isn ' t the index of self . df , this specifies which column of self . df contains the names for bt . For instance , if bt is the anchor1 BedTool , the df _ col = ' anchor11 ' ."""
import numpy as np import pandas as pd has_name_col = len ( self . annot_beds [ name ] [ 0 ] . fields ) > 3 print ( 'one' ) if complete : res = bt . intersect ( self . annot_beds [ name ] , sorted = True , wo = True , F = 1 ) else : res = bt . intersect ( self . annot_beds [ name ] , sorted = True , wo = True ) print ( 'two' ) try : df = res . to_dataframe ( names = range ( len ( res [ 0 ] . fields ) ) ) ind = df [ 3 ] . values if df_col is None : self . df [ col_name ] = False self . df . ix [ set ( ind ) , col_name ] = True else : tdf = pd . DataFrame ( True , index = ind , columns = [ col_name ] ) self . df = self . df . merge ( tdf , left_on = df_col , right_index = True , how = 'outer' ) self . df [ col_name ] = self . df [ col_name ] . fillna ( False ) # self . df . ix [ self . df [ col _ name ] . isnull ( ) , col _ name ] = False print ( 'a' ) if has_name_col : vals = df [ 7 ] . values else : vals = list ( df [ 4 ] . astype ( str ) + ':' + df [ 5 ] . astype ( str ) + '-' + df [ 6 ] . astype ( str ) ) print ( 'b' ) df . index = vals gb = df . groupby ( 3 ) t = pd . Series ( gb . groups ) print ( 'c' ) t = pd . DataFrame ( t . apply ( lambda x : set ( x ) ) ) print ( 'd' ) t . columns = [ '{}_features' . format ( col_name ) ] self . df = self . df . merge ( t , left_on = df_col , right_index = True , how = 'outer' ) print ( 'e' ) except IndexError : pass
def new_feed ( self , name : str , layer_shape : tuple ) : """Creates a feed layer . This is usually the first layer in the network . : param name : name of the layer : return :"""
feed_data = tf . placeholder ( tf . float32 , layer_shape , 'input' ) self . __network . add_layer ( name , layer_output = feed_data )
def table ( self , datatype = None ) : "Deprecated method to convert any Element to a Table ."
if config . future_deprecations : self . param . warning ( "The table method is deprecated and should no " "longer be used. Instead cast the %s to a " "a Table directly." % type ( self ) . __name__ ) if datatype and not isinstance ( datatype , list ) : datatype = [ datatype ] from . . element import Table return Table ( self , ** ( dict ( datatype = datatype ) if datatype else { } ) )
def hasColumn ( self , column , recurse = True , flags = 0 ) : """Returns whether or not this column exists within the list of columns for this schema . : return < bool >"""
return column in self . columns ( recurse = recurse , flags = flags )
def transform_velocity_array ( array , pos_array , vel , euler , rotation_vel = ( 0 , 0 , 0 ) ) : """Transform any Nx3 velocity vector array by adding the center - of - mass ' vel ' , accounting for solid - body rotation , and applying an euler transformation . : parameter array array : numpy array of Nx3 velocity vectors in the original ( star ) coordinate frame : parameter array pos _ array : positions of the elements with respect to the original ( star ) coordinate frame . Must be the same shape as ' array ' . : parameter array vel : numpy array with length 3 giving cartesian velocity offsets in the new ( system ) coordinate frame : parameter array euler : euler angles ( etheta , elongan , eincl ) in radians : parameter array rotation _ vel : vector of the rotation velocity of the star in the original ( star ) coordinate frame : return : new velocity array with same shape as ' array '"""
trans_matrix = euler_trans_matrix ( * euler ) # v _ { rot , i } = omega x r _ i with omega = rotation _ vel rotation_component = np . cross ( rotation_vel , pos_array , axisb = 1 ) orbital_component = np . asarray ( vel ) if isinstance ( array , ComputedColumn ) : array = array . for_computations new_vel = np . dot ( np . asarray ( array ) + rotation_component , trans_matrix . T ) + orbital_component return new_vel
def list_presubscriptions ( self , ** kwargs ) : """Get a list of pre - subscription data : returns : a list of ` Presubscription ` objects : rtype : list of mbed _ cloud . presubscription . Presubscription"""
api = self . _get_api ( mds . SubscriptionsApi ) resp = api . get_pre_subscriptions ( ** kwargs ) return [ Presubscription ( p ) for p in resp ]
def update ( self , path , node ) : '''Update the dict with a new color using a ' path ' through the dict . You can either pass an existing path e . g . ' Scaffold . mutations ' to override a color or part of the hierarchy or you can add a new leaf node or dict .'''
assert ( type ( path ) == type ( self . name ) ) assert ( type ( node ) == type ( self . name ) or type ( node ) == type ( predefined ) ) d = self . color_scheme tokens = path . split ( '.' ) for t in tokens [ : - 1 ] : d = d . get ( t ) if d == None : raise Exception ( "Path '%s' not found." ) d [ tokens [ - 1 ] ] = node
def from_dict ( data , ctx ) : """Instantiate a new UnitsAvailable from a dict ( generally from loading a JSON response ) . The data used to instantiate the UnitsAvailable is a shallow copy of the dict passed in , with any complex child types instantiated appropriately ."""
data = data . copy ( ) if data . get ( 'default' ) is not None : data [ 'default' ] = ctx . order . UnitsAvailableDetails . from_dict ( data [ 'default' ] , ctx ) if data . get ( 'reduceFirst' ) is not None : data [ 'reduceFirst' ] = ctx . order . UnitsAvailableDetails . from_dict ( data [ 'reduceFirst' ] , ctx ) if data . get ( 'reduceOnly' ) is not None : data [ 'reduceOnly' ] = ctx . order . UnitsAvailableDetails . from_dict ( data [ 'reduceOnly' ] , ctx ) if data . get ( 'openOnly' ) is not None : data [ 'openOnly' ] = ctx . order . UnitsAvailableDetails . from_dict ( data [ 'openOnly' ] , ctx ) return UnitsAvailable ( ** data )
def _updateConstructorAndMembers ( self ) : """We overwrite constructor and accessors every time because the constructor might have to consume all members even if their decorator is below the " synthesizeConstructor " decorator and it also might need to update the getters and setters because the naming convention has changed ."""
syntheticMetaData = self . _syntheticMetaData ( ) constructor = self . _constructorFactory . makeConstructor ( syntheticMetaData . originalConstructor ( ) , syntheticMetaData . syntheticMemberList ( ) , syntheticMetaData . doesConsumeArguments ( ) ) self . _class . __init__ = constructor for syntheticMember in syntheticMetaData . syntheticMemberList ( ) : syntheticMember . apply ( self . _class , syntheticMetaData . originalMemberNameList ( ) , syntheticMetaData . namingConvention ( ) ) if syntheticMetaData . hasEqualityGeneration ( ) : eq = self . _comparisonFactory . makeEqualFunction ( syntheticMetaData . originalEqualFunction ( ) , syntheticMetaData . syntheticMemberList ( ) ) ne = self . _comparisonFactory . makeNotEqualFunction ( syntheticMetaData . originalNotEqualFunction ( ) , syntheticMetaData . syntheticMemberList ( ) ) hashFunc = self . _comparisonFactory . makeHashFunction ( syntheticMetaData . originalHashFunction ( ) , syntheticMetaData . syntheticMemberList ( ) ) self . _class . __eq__ = eq self . _class . __ne__ = ne self . _class . __hash__ = hashFunc
def two_lorentzian ( freq , freq0_1 , freq0_2 , area1 , area2 , hwhm1 , hwhm2 , phase1 , phase2 , offset , drift ) : """A two - Lorentzian model . This is simply the sum of two lorentzian functions in some part of the spectrum . Each individual Lorentzian has its own peak frequency , area , hwhm and phase , but they share common offset and drift parameters ."""
return ( lorentzian ( freq , freq0_1 , area1 , hwhm1 , phase1 , offset , drift ) + lorentzian ( freq , freq0_2 , area2 , hwhm2 , phase2 , offset , drift ) )
def _extract_methods ( self ) : """Obtains the methods used in the service ."""
service = self . _service all_urls = set ( ) urls_with_options = set ( ) if not service . http : return for rule in service . http . rules : http_method , url = _detect_pattern_option ( rule ) if not url or not http_method or not rule . selector : _logger . error ( u'invalid HTTP binding encountered' ) continue # Obtain the method info method_info = self . _get_or_create_method_info ( rule . selector ) if rule . body : method_info . body_field_path = rule . body if not self . _register ( http_method , url , method_info ) : continue # detected an invalid url all_urls . add ( url ) if http_method == self . _OPTIONS : urls_with_options . add ( url ) self . _add_cors_options_selectors ( all_urls - urls_with_options ) self . _update_usage ( ) self . _update_system_parameters ( )
async def on_isupport_excepts ( self , value ) : """Server allows ban exceptions ."""
if not value : value = BAN_EXCEPT_MODE self . _channel_modes . add ( value ) self . _channel_modes_behaviour [ rfc1459 . protocol . BEHAVIOUR_LIST ] . add ( value )
def execution_time ( self , value ) : """Force the execution _ time to always be a datetime : param value : : return :"""
if value : self . _execution_time = parse ( value ) if isinstance ( value , type_check ) else value
def read ( self , ** keys ) : """read a data from an ascii table HDU By default , all rows are read . Send rows = to select subsets of the data . Table data are read into a recarray for multiple columns , plain array for a single column . parameters columns : list / array An optional set of columns to read from table HDUs . Can be string or number . If a sequence , a recarray is always returned . If a scalar , an ordinary array is returned . rows : list / array , optional An optional list of rows to read from table HDUS . Default is to read all . vstorage : string , optional Over - ride the default method to store variable length columns . Can be ' fixed ' or ' object ' . See docs on fitsio . FITS for details . lower : bool , optional If True , force all columns names to lower case in output . Will over ride the lower = keyword from construction . upper : bool , optional If True , force all columns names to upper case in output . Will over ride the lower = keyword from construction ."""
rows = keys . get ( 'rows' , None ) columns = keys . get ( 'columns' , None ) # if columns is None , returns all . Guaranteed to be unique and sorted colnums = self . _extract_colnums ( columns ) if isinstance ( colnums , int ) : # scalar sent , don ' t read as a recarray return self . read_column ( columns , ** keys ) rows = self . _extract_rows ( rows ) if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size # if rows is None still returns None , and is correctly interpreted # by the reader to mean all rows = self . _extract_rows ( rows ) # this is the full dtype for all columns dtype , offsets , isvar = self . get_rec_dtype ( colnums = colnums , ** keys ) array = numpy . zeros ( nrows , dtype = dtype ) # note reading into existing data wnotvar , = numpy . where ( isvar == False ) # noqa if wnotvar . size > 0 : for i in wnotvar : colnum = colnums [ i ] name = array . dtype . names [ i ] a = array [ name ] . copy ( ) self . _FITS . read_column ( self . _ext + 1 , colnum + 1 , a , rows ) array [ name ] = a del a array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) wvar , = numpy . where ( isvar == True ) # noqa if wvar . size > 0 : for i in wvar : colnum = colnums [ i ] name = array . dtype . names [ i ] dlist = self . _FITS . read_var_column_as_list ( self . _ext + 1 , colnum + 1 , rows ) if ( isinstance ( dlist [ 0 ] , str ) or ( IS_PY3 and isinstance ( dlist [ 0 ] , bytes ) ) ) : is_string = True else : is_string = False if array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] == 'O' : # storing in object array # get references to each , no copy made for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) array [ name ] [ irow ] = item else : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) if is_string : array [ name ] [ irow ] = item else : ncopy = len ( item ) array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , ** keys ) return array
def run ( self ) : """Run install main logic ."""
try : if not self . _is_rpm_all_lib_include_files_installed ( ) : self . _make_lib_file_symbolic_links ( ) self . _copy_each_include_files_to_include_dir ( ) self . _make_dep_lib_file_sym_links_and_copy_include_files ( ) self . setup_py . add_patchs_to_build_without_pkg_config ( self . rpm . lib_dir , self . rpm . include_dir ) self . setup_py . apply_and_save ( ) self . _build_and_install ( ) except InstallError as e : if not self . _is_rpm_all_lib_include_files_installed ( ) : org_message = str ( e ) message = ''' Install failed without rpm-devel package by below reason. Can you install the RPM package, and run this installer again? ''' message += org_message raise InstallError ( message ) else : raise e
def split_data ( X , y , ratio = ( 0.8 , 0.1 , 0.1 ) ) : """Splits data into a training , validation , and test set . Args : X : text data y : data labels ratio : the ratio for splitting . Default : ( 0.8 , 0.1 , 0.1) Returns : split data : X _ train , X _ val , X _ test , y _ train , y _ val , y _ test"""
assert ( sum ( ratio ) == 1 and len ( ratio ) == 3 ) X_train , X_rest , y_train , y_rest = train_test_split ( X , y , train_size = ratio [ 0 ] ) X_val , X_test , y_val , y_test = train_test_split ( X_rest , y_rest , train_size = ratio [ 1 ] ) return X_train , X_val , X_test , y_train , y_val , y_test
def ReadClientFullInfo ( self , client_id ) : """Reads full client information for a single client . Args : client _ id : A GRR client id string , e . g . " C . ea3b2b71840d6fa7 " . Returns : A ` ClientFullInfo ` instance for given client . Raises : UnknownClientError : if no client with such id was found ."""
result = self . MultiReadClientFullInfo ( [ client_id ] ) try : return result [ client_id ] except KeyError : raise UnknownClientError ( client_id )
def _makeKey ( self , usern ) : """Make a new , probably unique key . This key will be sent in an email to the user and is used to access the password change form ."""
return unicode ( hashlib . md5 ( str ( ( usern , time . time ( ) , random . random ( ) ) ) ) . hexdigest ( ) )
def TR ( self , ** kwargs ) : # pragma : no cover """NAME : TR PURPOSE : Calculate the radial period for a power - law rotation curve INPUT : scipy . integrate . quadrature keywords OUTPUT : T _ R ( R , vT , vT ) * vc / ro + estimate of the error HISTORY : 2010-12-01 - Written - Bovy ( NYU )"""
if hasattr ( self , '_TR' ) : return self . _TR ( rperi , rap ) = self . calcRapRperi ( ** kwargs ) if nu . fabs ( rap - rperi ) / rap < 10. ** - 4. : # Rough limit self . _TR = 2. * m . pi / epifreq ( self . _pot , self . _R , use_physical = False ) return self . _TR Rmean = m . exp ( ( m . log ( rperi ) + m . log ( rap ) ) / 2. ) EL = self . calcEL ( ** kwargs ) E , L = EL TR = 0. if Rmean > rperi : TR += integrate . quadrature ( _TRAxiIntegrandSmall , 0. , m . sqrt ( Rmean - rperi ) , args = ( E , L , self . _pot , rperi ) , ** kwargs ) [ 0 ] if Rmean < rap : TR += integrate . quadrature ( _TRAxiIntegrandLarge , 0. , m . sqrt ( rap - Rmean ) , args = ( E , L , self . _pot , rap ) , ** kwargs ) [ 0 ] self . _TR = 2. * TR return self . _TR
def write ( self , data ) : '''Write method used by internal tarfile instance to output data . This method blocks tarfile execution once internal buffer is full . As this method is blocking , it is used inside the same thread of : meth : ` fill ` . : param data : bytes to write to internal buffer : type data : bytes : returns : number of bytes written : rtype : int'''
self . _add . wait ( ) self . _data += data if len ( self . _data ) > self . _want : self . _add . clear ( ) self . _result . set ( ) return len ( data )
def add_forward_workflow ( self , dag , sections , satisfies = None ) : '''Add a forward - workflow , return number of nodes added'''
dag . new_forward_workflow ( ) if 'DAG' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'DAG' , f'Adding mini-workflow with {len(sections)} sections' ) default_input : sos_targets = sos_targets ( [ ] ) for idx , section in enumerate ( sections ) : res = analyze_section ( section , default_input = default_input ) environ_vars = res [ 'environ_vars' ] signature_vars = res [ 'signature_vars' ] changed_vars = res [ 'changed_vars' ] # parameters , if used in the step , should be considered environmental environ_vars |= env . parameter_vars & signature_vars # add shared to targets if res [ 'changed_vars' ] : if 'provides' in section . options : if isinstance ( section . options [ 'provides' ] , str ) : section . options . set ( 'provides' , [ section . options [ 'provides' ] ] ) else : section . options . set ( 'provides' , [ ] ) section . options . set ( 'provides' , section . options [ 'provides' ] + [ sos_variable ( var ) for var in changed_vars ] ) context = { '__signature_vars__' : signature_vars , '__environ_vars__' : environ_vars , '__changed_vars__' : changed_vars , '__dynamic_depends__' : res [ 'dynamic_depends' ] , '__dynamic_input__' : res [ 'dynamic_input' ] } # for nested workflow , the input is specified by sos _ run , not None . if idx == 0 : context [ '__step_output__' ] = env . sos_dict [ '__step_output__' ] # can be the only step if idx == len ( sections ) - 1 and satisfies is not None : res [ 'step_output' ] . extend ( satisfies ) dag . add_step ( section . uuid , section . step_name ( ) , idx , res [ 'step_input' ] , res [ 'step_depends' ] , res [ 'step_output' ] , context = context ) default_input = res [ 'step_output' ] return len ( sections )
def _find ( expr , sub , start = 0 , end = None ) : """Return lowest indexes in each strings in the sequence or scalar where the substring is fully contained between [ start : end ] . Return - 1 on failure . Equivalent to standard str . find ( ) . : param expr : : param sub : substring being searched : param start : left edge index : param end : right edge index : return : sequence or scalar"""
return _string_op ( expr , Find , output_type = types . int64 , _sub = sub , _start = start , _end = end )
def add_send_last_message ( self , connection , send_last_message ) : """Adds a send _ last _ message function to the Dispatcher ' s dictionary of functions indexed by connection . Args : connection ( str ) : A locally unique identifier provided by the receiver of messages . send _ last _ message ( fn ) : The method that should be called by the dispatcher to respond to messages which arrive via connection , when the connection should be closed after the message has been sent ."""
self . _send_last_message [ connection ] = send_last_message LOGGER . debug ( "Added send_last_message function " "for connection %s" , connection )
def possible_public_pairs_for_signature ( self , value , signature , y_parity = None ) : """: param : value : an integer value : param : signature : an ` ` ( r , s ) ` ` pair of integers representing an ecdsa signature of ` ` value ` ` : param : y _ parity : ( optional ) for a given value and signature , there are either two points that sign it , or none if the signature is invalid . One of the points has an even y value , the other an odd . If this parameter is set , only points whose y value matches this value will be returned in the list . : return : a list of : class : ` Point < pycoin . ecdsa . Point . Point > ` objects p where each p is a possible public key for which ` ` signature ` ` correctly signs the given ` ` value ` ` . If something goes wrong , this list will be empty ."""
r , s = signature try : points = self . points_for_x ( r ) except ValueError : return [ ] if y_parity is not None : if y_parity & 1 : points = points [ 1 : ] else : points = points [ : 1 ] inv_r = self . inverse ( r ) s_over_r = s * inv_r minus_E_over_r = - ( inv_r * value ) * self try : return [ s_over_r * p + minus_E_over_r for p in points ] except ValueError : return [ ]
def _error_dm ( self , m , dm , s ) : """Error function . Once self . goal has been defined , compute the error of input using the generalized forward model ."""
pred = self . fmodel . predict_given_context ( np . hstack ( ( m , dm ) ) , s , range ( len ( s ) ) ) err_v = pred - self . goal error = sum ( e * e for e in err_v ) return error
def get_start_time_str ( self ) : """: return : | attr _ start _ datetime | as | str | formatted with | attr _ start _ time _ format | . Return | NaT | if the invalid value or the invalid format . : rtype : str : Sample Code : . . code : : python from datetimerange import DateTimeRange time _ range = DateTimeRange ( " 2015-03-22T10:00:00 + 0900 " , " 2015-03-22T10:10:00 + 0900 " ) print ( time _ range . get _ start _ time _ str ( ) ) time _ range . start _ time _ format = " % Y / % m / % d % H : % M : % S " print ( time _ range . get _ start _ time _ str ( ) ) : Output : . . parsed - literal : : 2015-03-22T10:00:00 + 0900 2015/03/22 10:00:00"""
try : return self . start_datetime . strftime ( self . start_time_format ) except AttributeError : return self . NOT_A_TIME_STR
def linspace2 ( a , b , n , dtype = None ) : """similar to numpy . linspace but excluding the boundaries this is the normal numpy . linspace : > > > print linspace ( 0,1,5) [ 0 . 0.25 0.5 0.75 1 . ] and this gives excludes the boundaries : > > > print linspace2(0,1,5) [ 0.1 0.3 0.5 0.7 0.9]"""
a = linspace ( a , b , n + 1 , dtype = dtype ) [ : - 1 ] if len ( a ) > 1 : diff01 = ( ( a [ 1 ] - a [ 0 ] ) / 2 ) . astype ( a . dtype ) a += diff01 return a
def listdict_to_listlist_and_matrix ( sparse ) : """Transforms the adjacency list representation of a graph of type listdict into the listlist + weight matrix representation : param sparse : graph in listdict representation : returns : couple with listlist representation , and weight matrix : complexity : linear"""
V = range ( len ( sparse ) ) graph = [ [ ] for _ in V ] weight = [ [ None for v in V ] for u in V ] for u in V : for v in sparse [ u ] : graph [ u ] . append ( v ) weight [ u ] [ v ] = sparse [ u ] [ v ] return graph , weight
def return_file_objects ( connection , container , prefix = 'database' ) : """Given connecton and container find database dumps"""
options = [ ] meta_data = objectstore . get_full_container_list ( connection , container , prefix = 'database' ) env = ENV . upper ( ) for o_info in meta_data : expected_file = f'database.{ENV}' if o_info [ 'name' ] . startswith ( expected_file ) : dt = dateparser . parse ( o_info [ 'last_modified' ] ) now = datetime . datetime . now ( ) delta = now - dt LOG . debug ( 'AGE: %d %s' , delta . days , expected_file ) options . append ( ( dt , o_info ) ) options . sort ( ) return options
def on_add_vrf_conf ( self , evt ) : """Event handler for new VrfConf . Creates a VrfTable to store routing information related to new Vrf . Also arranges for related paths to be imported to this VrfTable ."""
vrf_conf = evt . value route_family = vrf_conf . route_family assert route_family in vrfs . SUPPORTED_VRF_RF # Create VRF table with given configuration . vrf_table = self . _table_manager . create_and_link_vrf_table ( vrf_conf ) # Attach VrfConf change listeners . vrf_conf . add_listener ( ConfWithStats . UPDATE_STATS_LOG_ENABLED_EVT , self . on_stats_config_change ) vrf_conf . add_listener ( ConfWithStats . UPDATE_STATS_TIME_EVT , self . on_stats_config_change ) vrf_conf . add_listener ( VrfConf . VRF_CHG_EVT , self . on_chg_vrf_conf ) # Import paths from VPN table that match this VRF / VPN . self . _table_manager . import_all_vpn_paths_to_vrf ( vrf_table ) # Update local RT NLRIs self . _rt_manager . update_local_rt_nlris ( ) self . _signal_bus . vrf_added ( vrf_conf )
def zip_offset ( * iterables , offsets , longest = False , fillvalue = None ) : """` ` zip ` ` the input * iterables * together , but offset the ` i ` - th iterable by the ` i ` - th item in * offsets * . > > > list ( zip _ offset ( ' 0123 ' , ' abcdef ' , offsets = ( 0 , 1 ) ) ) [ ( ' 0 ' , ' b ' ) , ( ' 1 ' , ' c ' ) , ( ' 2 ' , ' d ' ) , ( ' 3 ' , ' e ' ) ] This can be used as a lightweight alternative to SciPy or pandas to analyze data sets in which some series have a lead or lag relationship . By default , the sequence will end when the shortest iterable is exhausted . To continue until the longest iterable is exhausted , set * longest * to ` ` True ` ` . > > > list ( zip _ offset ( ' 0123 ' , ' abcdef ' , offsets = ( 0 , 1 ) , longest = True ) ) [ ( ' 0 ' , ' b ' ) , ( ' 1 ' , ' c ' ) , ( ' 2 ' , ' d ' ) , ( ' 3 ' , ' e ' ) , ( None , ' f ' ) ] By default , ` ` None ` ` will be used to replace offsets beyond the end of the sequence . Specify * fillvalue * to use some other value ."""
if len ( iterables ) != len ( offsets ) : raise ValueError ( "Number of iterables and offsets didn't match" ) staggered = [ ] for it , n in zip ( iterables , offsets ) : if n < 0 : staggered . append ( chain ( repeat ( fillvalue , - n ) , it ) ) elif n > 0 : staggered . append ( islice ( it , n , None ) ) else : staggered . append ( it ) if longest : return zip_longest ( * staggered , fillvalue = fillvalue ) return zip ( * staggered )
def seek ( self , pos = 0 ) : """Set the stream ' s file pointer to pos . Negative seeking is forbidden ."""
if pos - self . pos >= 0 : blocks , remainder = divmod ( pos - self . pos , self . bufsize ) for i in range ( blocks ) : self . read ( self . bufsize ) self . read ( remainder ) else : raise StreamError ( "seeking backwards is not allowed" ) return self . pos
def insert_column ( self , data_array , ckey = 'temp' , index = None ) : """This will insert / overwrite a new column and fill it with the data from the the supplied array . Parameters data _ array Data ; can be a list , but will be converted to numpy array ckey Name of the column ; if an integer is supplied , uses self . ckeys [ ckey ] index Where to insert this column . None = > append to end ."""
# if it ' s an integer , use the ckey from the list if type ( ckey ) in [ int , int ] : ckey = self . ckeys [ ckey ] # append / overwrite the column value self . columns [ ckey ] = _n . array ( data_array ) if not ckey in self . ckeys : if index is None : self . ckeys . append ( ckey ) else : self . ckeys . insert ( index , ckey ) return self
def list_conversions ( api_key , api_secret , video_key , ** kwargs ) : """Function which retrieves a list of a video object ' s conversions . : param api _ key : < string > JWPlatform api - key : param api _ secret : < string > JWPlatform shared - secret : param video _ key : < string > Video ' s object ID . Can be found within JWPlayer Dashboard . : param kwargs : Arguments conforming to standards found @ https : / / developer . jwplayer . com / jw - platform / reference / v1 / methods / videos / conversions / list . html : return : < dict > Dict which represents the JSON response ."""
jwplatform_client = jwplatform . Client ( api_key , api_secret ) logging . info ( "Querying for video conversions." ) try : response = jwplatform_client . videos . conversions . list ( video_key = video_key , ** kwargs ) except jwplatform . errors . JWPlatformError as e : logging . error ( "Encountered an error querying for video conversions.\n{}" . format ( e ) ) sys . exit ( e . message ) return response
def get_tdata ( t_format , files ) : """Get the time information from file names Parameters t _ format : str The string that can be used to get the time information in the files . Any numeric datetime format string ( e . g . % Y , % m , % H ) can be used , but not non - numeric strings like % b , etc . See [ 1 ] _ for the datetime format strings files : list of str The that contain the time informations Returns pandas . Index The time coordinate list of str The file names as they are sorten in the returned index References . . [ 1 ] https : / / docs . python . org / 2 / library / datetime . html"""
def median ( arr ) : return arr . min ( ) + ( arr . max ( ) - arr . min ( ) ) / 2 import re from pandas import Index t_pattern = t_format for fmt , patt in t_patterns . items ( ) : t_pattern = t_pattern . replace ( fmt , patt ) t_pattern = re . compile ( t_pattern ) time = list ( range ( len ( files ) ) ) for i , f in enumerate ( files ) : time [ i ] = median ( np . array ( list ( map ( lambda s : np . datetime64 ( dt . datetime . strptime ( s , t_format ) ) , t_pattern . findall ( f ) ) ) ) ) ind = np . argsort ( time ) # sort according to time files = np . array ( files ) [ ind ] time = np . array ( time ) [ ind ] return to_datetime ( Index ( time , name = 'time' ) ) , files
def get_sigma_mu_adjustment ( self , C , imt , rup , dists ) : """Returns the sigma mu adjustment factor"""
if imt . name in "PGA PGV" : # PGA and PGV are 2D arrays of dimension [ nmags , ndists ] sigma_mu = getattr ( self , imt . name . lower ( ) ) if rup . mag <= self . mags [ 0 ] : sigma_mu_m = sigma_mu [ 0 , : ] elif rup . mag >= self . mags [ - 1 ] : sigma_mu_m = sigma_mu [ - 1 , : ] else : intpl1 = interp1d ( self . mags , sigma_mu , axis = 0 ) sigma_mu_m = intpl1 ( rup . mag ) # Linear interpolation with distance intpl2 = interp1d ( self . dists , sigma_mu_m , bounds_error = False , fill_value = ( sigma_mu_m [ 0 ] , sigma_mu_m [ - 1 ] ) ) return intpl2 ( dists . rjb ) # In the case of SA the array is of dimension [ nmags , ndists , nperiods ] # Get values for given magnitude if rup . mag <= self . mags [ 0 ] : sigma_mu_m = self . s_a [ 0 , : , : ] elif rup . mag >= self . mags [ - 1 ] : sigma_mu_m = self . s_a [ - 1 , : , : ] else : intpl1 = interp1d ( self . mags , self . s_a , axis = 0 ) sigma_mu_m = intpl1 ( rup . mag ) # Get values for period - N . B . ln T , linear sigma mu interpolation if imt . period <= self . periods [ 0 ] : sigma_mu_t = sigma_mu_m [ : , 0 ] elif imt . period >= self . periods [ - 1 ] : sigma_mu_t = sigma_mu_m [ : , - 1 ] else : intpl2 = interp1d ( np . log ( self . periods ) , sigma_mu_m , axis = 1 ) sigma_mu_t = intpl2 ( np . log ( imt . period ) ) intpl3 = interp1d ( self . dists , sigma_mu_t , bounds_error = False , fill_value = ( sigma_mu_t [ 0 ] , sigma_mu_t [ - 1 ] ) ) return intpl3 ( dists . rjb )
def get ( self , path ) : """get renders the notebook template if a name is given , or redirects to the ' / files / ' handler if the name is not given ."""
path = path . strip ( '/' ) self . log . info ( 'Appmode get: %s' , path ) # Abort if the app path is not below configured trusted _ path . if not path . startswith ( self . trusted_path ) : self . log . warn ( 'Appmode refused to launch %s outside trusted path %s.' , path , self . trusted_path ) raise web . HTTPError ( 401 , 'Notebook is not within trusted Appmode path.' ) cm = self . contents_manager # will raise 404 on not found try : model = cm . get ( path , content = False ) except web . HTTPError as e : if e . status_code == 404 and 'files' in path . split ( '/' ) : # 404 , but ' / files / ' in URL , let FilesRedirect take care of it return FilesRedirectHandler . redirect_to_files ( self , path ) else : raise if model [ 'type' ] != 'notebook' : # not a notebook , redirect to files return FilesRedirectHandler . redirect_to_files ( self , path ) # fix back button navigation self . add_header ( "Cache-Control" , "cache-control: private, max-age=0, no-cache, no-store" ) # gather template parameters tmp_path = self . mk_tmp_copy ( path ) tmp_name = tmp_path . rsplit ( '/' , 1 ) [ - 1 ] render_kwargs = { 'notebook_path' : tmp_path , 'notebook_name' : tmp_name , 'kill_kernel' : False , 'mathjax_url' : self . mathjax_url , 'mathjax_config' : self . mathjax_config , 'show_edit_button' : self . show_edit_button , 'show_other_buttons' : self . show_other_buttons , } # template parameters changed over time if hasattr ( orig_handler , "get_custom_frontend_exporters" ) : get_cfw = orig_handler . get_custom_frontend_exporters render_kwargs [ 'get_custom_frontend_exporters' ] = get_cfw # Ok let ' s roll . . . . self . write ( self . render_template ( 'appmode.html' , ** render_kwargs ) )
def delete_asset ( self ) : """Delete asset from the release . : rtype : bool"""
headers , data = self . _requester . requestJsonAndCheck ( "DELETE" , self . url ) return True
def load_js ( js_url = None , version = '5.2.0' ) : """Load Dropzone ' s js resources with given version . . . versionadded : : 1.4.4 : param js _ url : The JS url for Dropzone . js . : param version : The version of Dropzone . js ."""
js_filename = 'dropzone.min.js' serve_local = current_app . config [ 'DROPZONE_SERVE_LOCAL' ] if serve_local : js = '<script src="%s"></script>\n' % url_for ( 'dropzone.static' , filename = js_filename ) else : js = '<script src="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/%s"></script>\n' % ( version , js_filename ) if js_url : js = '<script src="%s"></script>\n' % js_url return Markup ( js )
def step_next ( self ) : """Go to the next step ."""
window_start = around ( self . parent . value ( 'window_start' ) + self . parent . value ( 'window_length' ) / self . parent . value ( 'window_step' ) , 2 ) self . parent . overview . update_position ( window_start )
def new ( cls , username , password = None , email = None , first_name = "" , last_name = "" , login_method = None , role = "MEMBER" ) : """Create a new user : param username : str : param password : str : param email : str : param first _ name : str : param last _ name : str : param login _ method : str : param role : str : return : AuthUser"""
data = { "first_name" : first_name , "last_name" : last_name , "email" : email } if not password : password = utils . generate_random_string ( ) username = username . strip ( ) . lower ( ) if "@" in username and not email : if not utils . is_email_valid ( username ) : exceptions . AuthError ( _ ( "Invalid username" ) ) data [ "email" ] = username elif email : if not utils . is_email_valid ( email ) : exceptions . AuthError ( _ ( "Invalid username" ) ) if not utils . is_username_valid ( username ) : exceptions . AuthError ( _ ( "Invalid username" ) ) if not utils . is_password_valid ( password ) : raise exceptions . AuthError ( _ ( "Password is invalid" ) ) if cls . get_by_username ( username ) : raise exceptions . AuthError ( _ ( "Username exists already" ) ) _email = data . get ( "email" ) if _email : if cls . get_by_email ( _email ) : raise exceptions . AuthError ( _ ( "Email address exists already" ) ) role = AuthUserRole . get_by_name ( role or "MEMBER" ) if not role : raise exceptions . AuthError ( _ ( "Invalid user role" ) ) data . update ( { "username" : username , "password_hash" : cls . encrypt_password ( password ) , "email_verified" : False , "login_method" : login_method , "role" : role , "status" : cls . STATUS_ACTIVE } ) user = cls . create ( ** data ) user . reset_secret_key ( ) return user
def readPrefs_dms_tools_format ( f ) : """Reads the amino - acid preferences written by ` dms _ tools v1 < http : / / jbloomlab . github . io / dms _ tools / > ` _ . This is an exact copy of the same code from ` dms _ tools . file _ io . ReadPreferences ` . It is copied because ` dms _ tools v1 < http : / / jbloomlab . github . io / dms _ tools / > ` _ is currently only compatible with ` python2 ` , and we needed something that also works with ` python3 ` . * f * is the name of an existing file or a readable file - like object . It should be in the format written by ` dms _ tools v1 < http : / / jbloomlab . github . io / dms _ tools / > ` _ . The return value is the tuple : * ( sites , wts , pi _ means , pi _ 95credint , h ) * where * sites * , * wts * , * pi _ means * , and * pi _ 95credint * will all have the same values used to write the file with * WritePreferences * , and * h * is a dictionary with * h [ r ] * giving the site entropy ( log base 2 ) for each * r * in * sites * ."""
charmatch = re . compile ( '^PI_([A-z\*\-]+)$' ) if isinstance ( f , str ) : f = open ( f ) lines = f . readlines ( ) f . close ( ) else : lines = f . readlines ( ) characters = [ ] sites = [ ] wts = { } pi_means = { } pi_95credint = { } h = { } for line in lines : if line . isspace ( ) : continue elif line [ 0 ] == '#' and not characters : entries = line [ 1 : ] . strip ( ) . split ( ) if len ( entries ) < 4 : raise ValueError ( "Insufficient entries in header:\n%s" % line ) if not ( entries [ 0 ] in [ 'POSITION' , 'SITE' ] and entries [ 1 ] [ : 2 ] == 'WT' and entries [ 2 ] == 'SITE_ENTROPY' ) : raise ValueError ( "Not the correct first three header columns:\n%s" % line ) i = 3 while i < len ( entries ) and charmatch . search ( entries [ i ] ) : characters . append ( charmatch . search ( entries [ i ] ) . group ( 1 ) ) i += 1 if i == len ( entries ) : pi_95credint = None linelength = len ( characters ) + 3 else : if not len ( entries ) - i == len ( characters ) : raise ValueError ( "Header line does not have valid credible interval format:\n%s" % line ) if not all ( [ entries [ i + j ] == 'PI_%s_95' % characters [ j ] for j in range ( len ( characters ) ) ] ) : raise ValueError ( "mean and credible interval character mismatch in header:\n%s" % line ) linelength = 2 * len ( characters ) + 3 elif line [ 0 ] == '#' : continue elif not characters : raise ValueError ( "Found data lines before encountering a valid header" ) else : entries = line . strip ( ) . split ( ) if len ( entries ) != linelength : raise ValueError ( "Line does not have expected %d entries:\n%s" % ( linelength , line ) ) r = entries [ 0 ] assert r not in sites , "Duplicate site of %s" % r sites . append ( r ) wts [ r ] = entries [ 1 ] assert entries [ 1 ] in characters or entries [ 1 ] == '?' , "Character %s is not one of the valid ones in header. Valid possibilities: %s" % ( entries [ 1 ] , ', ' . join ( characters ) ) h [ r ] = float ( entries [ 2 ] ) pi_means [ r ] = dict ( [ ( x , float ( entries [ 3 + i ] ) ) for ( i , x ) in enumerate ( characters ) ] ) if pi_95credint != None : pi_95credint [ r ] = dict ( [ ( x , ( float ( entries [ 3 + len ( characters ) + i ] . split ( ',' ) [ 0 ] ) , float ( entries [ 3 + len ( characters ) + i ] . split ( ',' ) [ 1 ] ) ) ) for ( i , x ) in enumerate ( characters ) ] ) return ( sites , wts , pi_means , pi_95credint , h )
def get_generator ( tweet ) : """Get information about the application that generated the Tweet Args : tweet ( Tweet ) : A Tweet object ( or a dictionary ) Returns : dict : keys are ' link ' and ' name ' , the web link and the name of the application Example : > > > from tweet _ parser . getter _ methods . tweet _ generator import get _ generator > > > original _ format _ dict = { . . . " created _ at " : " Wed May 24 20:17:19 + 0000 2017 " , . . . " source " : ' < a href = " http : / / twitter . com " rel = " nofollow " > Twitter Web Client < / a > ' > > > get _ generator ( original _ format _ dict ) { ' link ' : ' http : / / twitter . com ' , ' name ' : ' Twitter Web Client ' } > > > activity _ streams _ format _ dict = { . . . " postedTime " : " 2017-05-24T20:17:19.000Z " , . . . " generator " : . . . { " link " : " http : / / twitter . com " , . . . " displayName " : " Twitter Web Client " } > > > get _ generator ( activity _ streams _ format _ dict ) { ' link ' : ' http : / / twitter . com ' , ' name ' : ' Twitter Web Client ' }"""
if is_original_format ( tweet ) : if sys . version_info [ 0 ] == 3 and sys . version_info [ 1 ] >= 4 : parser = GeneratorHTMLParser ( convert_charrefs = True ) else : parser = GeneratorHTMLParser ( ) parser . feed ( tweet [ "source" ] ) return { "link" : parser . generator_link , "name" : parser . generator_name } else : return { "link" : tweet [ "generator" ] [ "link" ] , "name" : tweet [ "generator" ] [ "displayName" ] }
def uhash ( self , val ) : """Calculate hash from unicode value and return hex value as unicode"""
if not isinstance ( val , string_types ) : raise _TypeError ( "val" , "str" , val ) return codecs . encode ( self . hash ( val . encode ( "utf-8" ) ) , "hex_codec" ) . decode ( "utf-8" )
def round_to_x_digits ( number , digits ) : """Returns ' number ' rounded to ' digits ' digits ."""
return round ( number * math . pow ( 10 , digits ) ) / math . pow ( 10 , digits )
def all_origins ( m ) : '''Generate all unique statement origins in the given model'''
seen = set ( ) for link in m . match ( ) : origin = link [ ORIGIN ] if origin not in seen : seen . add ( origin ) yield origin
def override ( func ) : """THIS DECORATOR WILL PUT ALL PARAMETERS INTO THE ` kwargs ` PARAMETER AND THEN PUT ALL ` kwargs ` PARAMETERS INTO THE FUNCTION PARAMETERS . THIS HAS THE BENEFIT OF HAVING ALL PARAMETERS IN ONE PLACE ( kwargs ) , PLUS ALL PARAMETERS ARE EXPLICIT FOR CLARITY . OF COURSE , THIS MEANS PARAMETER ASSIGNMENT MAY NOT BE UNIQUE : VALUES CAN COME FROM EXPLICIT CALL PARAMETERS , OR FROM THE kwargs PARAMETER . IN THESE CASES , PARAMETER VALUES ARE CHOSEN IN THE FOLLOWING ORDER : 1 ) EXPLICT CALL PARAMETERS 2 ) PARAMETERS FOUND IN kwargs 3 ) DEFAULT VALUES ASSIGNED IN FUNCTION DEFINITION"""
func_name = get_function_name ( func ) params = get_function_arguments ( func ) if not get_function_defaults ( func ) : defaults = { } else : defaults = { k : v for k , v in zip ( reversed ( params ) , reversed ( get_function_defaults ( func ) ) ) } def raise_error ( e , packed ) : err = text_type ( e ) e = Except . wrap ( e ) if err . startswith ( func_name ) and ( "takes at least" in err or "required positional argument" in err ) : missing = [ p for p in params if str ( p ) not in packed ] given = [ p for p in params if str ( p ) in packed ] if not missing : raise e else : get_logger ( ) . error ( "Problem calling {{func_name}}: Expecting parameter {{missing}}, given {{given}}" , func_name = func_name , missing = missing , given = given , stack_depth = 2 , cause = e ) raise e if "kwargs" not in params : # WE ASSUME WE ARE ONLY ADDING A kwargs PARAMETER TO SOME REGULAR METHOD def wo_kwargs ( * args , ** kwargs ) : settings = kwargs . get ( "kwargs" ) ordered_params = dict ( zip ( params , args ) ) packed = params_pack ( params , ordered_params , kwargs , settings , defaults ) try : return func ( ** packed ) except TypeError as e : raise_error ( e , packed ) return wo_kwargs elif func_name in ( "__init__" , "__new__" ) : def w_constructor ( * args , ** kwargs ) : if "kwargs" in kwargs : packed = params_pack ( params , dict_zip ( params [ 1 : ] , args [ 1 : ] ) , kwargs , kwargs [ "kwargs" ] , defaults ) elif len ( args ) == 2 and len ( kwargs ) == 0 and is_data ( args [ 1 ] ) : # ASSUME SECOND UNNAMED PARAM IS kwargs packed = params_pack ( params , args [ 1 ] , defaults ) else : # DO NOT INCLUDE self IN kwargs packed = params_pack ( params , dict_zip ( params [ 1 : ] , args [ 1 : ] ) , kwargs , defaults ) try : return func ( args [ 0 ] , ** packed ) except TypeError as e : packed [ 'self' ] = args [ 0 ] # DO NOT SAY IS MISSING raise_error ( e , packed ) return w_constructor elif params [ 0 ] == "self" : def w_bound_method ( * args , ** kwargs ) : if len ( args ) == 2 and len ( kwargs ) == 0 and is_data ( args [ 1 ] ) : # ASSUME SECOND UNNAMED PARAM IS kwargs packed = params_pack ( params , args [ 1 ] , defaults ) elif "kwargs" in kwargs and is_data ( kwargs [ "kwargs" ] ) : # PUT args INTO kwargs packed = params_pack ( params , kwargs , dict_zip ( params [ 1 : ] , args [ 1 : ] ) , kwargs [ "kwargs" ] , defaults ) else : packed = params_pack ( params , kwargs , dict_zip ( params [ 1 : ] , args [ 1 : ] ) , defaults ) try : return func ( args [ 0 ] , ** packed ) except TypeError as e : raise_error ( e , packed ) return w_bound_method else : def w_kwargs ( * args , ** kwargs ) : if len ( args ) == 1 and len ( kwargs ) == 0 and is_data ( args [ 0 ] ) : # ASSUME SINGLE PARAMETER IS kwargs packed = params_pack ( params , args [ 0 ] , defaults ) elif "kwargs" in kwargs and is_data ( kwargs [ "kwargs" ] ) : # PUT args INTO kwargs packed = params_pack ( params , kwargs , dict_zip ( params , args ) , kwargs [ "kwargs" ] , defaults ) else : # PULL kwargs OUT INTO PARAMS packed = params_pack ( params , kwargs , dict_zip ( params , args ) , defaults ) try : return func ( ** packed ) except TypeError as e : raise_error ( e , packed ) return w_kwargs
def delete_topics ( self , topics , timeout_ms = None ) : """Delete topics from the cluster . : param topics : A list of topic name strings . : param timeout _ ms : Milliseconds to wait for topics to be deleted before the broker returns . : return : Appropriate version of DeleteTopicsResponse class ."""
version = self . _matching_api_version ( DeleteTopicsRequest ) timeout_ms = self . _validate_timeout ( timeout_ms ) if version <= 1 : request = DeleteTopicsRequest [ version ] ( topics = topics , timeout = timeout_ms ) response = self . _send_request_to_controller ( request ) else : raise NotImplementedError ( "Support for DeleteTopics v{} has not yet been added to KafkaAdminClient." . format ( version ) ) return response
def set_weather_from_metar ( metar : typing . Union [ Metar . Metar , str ] , in_file : typing . Union [ str , Path ] , out_file : typing . Union [ str , Path ] = None ) -> typing . Tuple [ typing . Union [ str , None ] , typing . Union [ str , None ] ] : """Applies the weather from a METAR object to a MIZ file Args : metar : metar object in _ file : path to MIZ file out _ file : path to output MIZ file ( will default to in _ file ) Returns : tuple of error , success"""
error , metar = custom_metar . CustomMetar . get_metar ( metar ) if error : return error , None if metar : LOGGER . debug ( 'METAR: %s' , metar . code ) in_file = elib . path . ensure_file ( in_file ) if out_file is None : out_file = in_file else : out_file = elib . path . ensure_file ( out_file , must_exist = False ) LOGGER . debug ( 'applying metar: %s -> %s' , in_file , out_file ) try : LOGGER . debug ( 'building MissionWeather' ) _mission_weather = mission_weather . MissionWeather ( metar ) with Miz ( str ( in_file ) ) as miz : _mission_weather . apply_to_miz ( miz ) miz . zip ( str ( out_file ) ) return None , f'successfully applied METAR to {in_file}' except ValueError : error = f'Unable to apply METAR string to the mission.\n' f'This is most likely due to a freak value, this feature is still experimental.\n' f'I will fix it ASAP !' return error , None
def property_as_list ( self , property_name ) : """property ( ) but encapsulates it in a list , if it ' s a single - element property ."""
try : res = self . _a_tags [ property_name ] except KeyError : return [ ] if type ( res ) == list : return res else : return [ res ]
def from_arrays ( cls , arrays , sortorder = None , names = None ) : """Convert arrays to MultiIndex . Parameters arrays : list / sequence of array - likes Each array - like gives one level ' s value for each data point . len ( arrays ) is the number of levels . sortorder : int or None Level of sortedness ( must be lexicographically sorted by that level ) . names : list / sequence of str , optional Names for the levels in the index . Returns index : MultiIndex See Also MultiIndex . from _ tuples : Convert list of tuples to MultiIndex . MultiIndex . from _ product : Make a MultiIndex from cartesian product of iterables . MultiIndex . from _ frame : Make a MultiIndex from a DataFrame . Examples > > > arrays = [ [ 1 , 1 , 2 , 2 ] , [ ' red ' , ' blue ' , ' red ' , ' blue ' ] ] > > > pd . MultiIndex . from _ arrays ( arrays , names = ( ' number ' , ' color ' ) ) MultiIndex ( levels = [ [ 1 , 2 ] , [ ' blue ' , ' red ' ] ] , codes = [ [ 0 , 0 , 1 , 1 ] , [ 1 , 0 , 1 , 0 ] ] , names = [ ' number ' , ' color ' ] )"""
error_msg = "Input must be a list / sequence of array-likes." if not is_list_like ( arrays ) : raise TypeError ( error_msg ) elif is_iterator ( arrays ) : arrays = list ( arrays ) # Check if elements of array are list - like for array in arrays : if not is_list_like ( array ) : raise TypeError ( error_msg ) # Check if lengths of all arrays are equal or not , # raise ValueError , if not for i in range ( 1 , len ( arrays ) ) : if len ( arrays [ i ] ) != len ( arrays [ i - 1 ] ) : raise ValueError ( 'all arrays must be same length' ) from pandas . core . arrays . categorical import _factorize_from_iterables codes , levels = _factorize_from_iterables ( arrays ) if names is None : names = [ getattr ( arr , "name" , None ) for arr in arrays ] return MultiIndex ( levels = levels , codes = codes , sortorder = sortorder , names = names , verify_integrity = False )
def power_method_opnorm ( op , xstart = None , maxiter = 100 , rtol = 1e-05 , atol = 1e-08 , callback = None ) : r"""Estimate the operator norm with the power method . Parameters op : ` Operator ` Operator whose norm is to be estimated . If its ` Operator . range ` range does not coincide with its ` Operator . domain ` , an ` Operator . adjoint ` must be defined ( which implies that the operator must be linear ) . xstart : ` ` op . domain ` ` ` element - like ` , optional Starting point of the iteration . By default an ` Operator . domain ` element containing noise is used . maxiter : positive int , optional Number of iterations to perform . If the domain and range of ` ` op ` ` do not match , it needs to be an even number . If ` ` None ` ` is given , iterate until convergence . rtol : float , optional Relative tolerance parameter ( see Notes ) . atol : float , optional Absolute tolerance parameter ( see Notes ) . callback : callable , optional Function called with the current iterate in each iteration . Returns est _ opnorm : float The estimated operator norm of ` ` op ` ` . Examples Verify that the identity operator has norm close to 1: > > > space = odl . uniform _ discr ( 0 , 1 , 5) > > > id = odl . IdentityOperator ( space ) > > > estimation = power _ method _ opnorm ( id ) > > > round ( estimation , ndigits = 3) 1.0 Notes The operator norm : math : ` | | A | | ` is defined by as the smallest number such that . . math : : | | A ( x ) | | \ leq | | A | | | | x | | for all : math : ` x ` in the domain of : math : ` A ` . The operator is evaluated until ` ` maxiter ` ` operator calls or until the relative error is small enough . The error measure is given by ` ` abs ( a - b ) < = ( atol + rtol * abs ( b ) ) ` ` , where ` ` a ` ` and ` ` b ` ` are consecutive iterates ."""
if maxiter is None : maxiter = np . iinfo ( int ) . max maxiter , maxiter_in = int ( maxiter ) , maxiter if maxiter <= 0 : raise ValueError ( '`maxiter` must be positive, got {}' '' . format ( maxiter_in ) ) if op . domain == op . range : use_normal = False ncalls = maxiter else : # Do the power iteration for A * A ; the norm of A * A ( x _ N ) is then # an estimate of the square of the operator norm # We do only half the number of iterations compared to the usual # case to have the same number of operator evaluations . use_normal = True ncalls = maxiter // 2 if ncalls * 2 != maxiter : raise ValueError ( '``maxiter`` must be an even number for ' 'non-self-adjoint operator, got {}' '' . format ( maxiter_in ) ) # Make sure starting point is ok or select initial guess if xstart is None : x = noise_element ( op . domain ) else : # copy to ensure xstart is not modified x = op . domain . element ( xstart ) . copy ( ) # Take first iteration step to normalize input x_norm = x . norm ( ) if x_norm == 0 : raise ValueError ( '``xstart`` must be nonzero' ) x /= x_norm # utility to calculate opnorm from xnorm def calc_opnorm ( x_norm ) : if use_normal : return np . sqrt ( x_norm ) else : return x_norm # initial guess of opnorm opnorm = calc_opnorm ( x_norm ) # temporary to improve performance tmp = op . range . element ( ) # Use the power method to estimate opnorm for i in range ( ncalls ) : if use_normal : op ( x , out = tmp ) op . adjoint ( tmp , out = x ) else : op ( x , out = tmp ) x , tmp = tmp , x # Calculate x norm and verify it is valid x_norm = x . norm ( ) if x_norm == 0 : raise ValueError ( 'reached ``x=0`` after {} iterations' . format ( i ) ) if not np . isfinite ( x_norm ) : raise ValueError ( 'reached nonfinite ``x={}`` after {} iterations' '' . format ( x , i ) ) # Calculate opnorm opnorm , opnorm_old = calc_opnorm ( x_norm ) , opnorm # If the breaking condition holds , stop . Else rescale and go on . if np . isclose ( opnorm , opnorm_old , rtol , atol ) : break else : x /= x_norm if callback is not None : callback ( x ) return opnorm
def handle_input ( self , input_hdr ) : """This method tries to ensure that the input data has the correct dimensions . INPUTS : input _ hdr ( no default ) Header from which data shape is to be extracted ."""
input_slice = input_hdr [ 'NAXIS' ] * [ 0 ] for i in range ( input_hdr [ 'NAXIS' ] ) : if input_hdr [ 'CTYPE%d' % ( i + 1 ) ] . startswith ( "RA" ) : input_slice [ - 1 ] = slice ( None ) if input_hdr [ 'CTYPE%d' % ( i + 1 ) ] . startswith ( "DEC" ) : input_slice [ - 2 ] = slice ( None ) return input_slice
def _remote_folder ( dirpath , remotes , syn ) : """Retrieve the remote folder for files , creating if necessary ."""
if dirpath in remotes : return remotes [ dirpath ] , remotes else : parent_dir , cur_dir = os . path . split ( dirpath ) parent_folder , remotes = _remote_folder ( parent_dir , remotes , syn ) s_cur_dir = syn . store ( synapseclient . Folder ( cur_dir , parent = parent_folder ) ) remotes [ dirpath ] = s_cur_dir . id return s_cur_dir . id , remotes
def feed_appdata ( self , data , offset = 0 ) : """Feed plaintext data into the pipe . Return an ( ssldata , offset ) tuple . The ssldata element is a list of buffers containing record level data that needs to be sent to the remote SSL instance . The offset is the number of plaintext bytes that were processed , which may be less than the length of data . NOTE : In case of short writes , this call MUST be retried with the SAME buffer passed into the * data * argument ( i . e . the ` ` id ( ) ` ` must be the same ) . This is an OpenSSL requirement . A further particularity is that a short write will always have offset = = 0 , because the _ ssl module does not enable partial writes . And even though the offset is zero , there will still be encrypted data in ssldata ."""
if self . _state == self . S_UNWRAPPED : # pass through data in unwrapped mode return ( [ data [ offset : ] ] if offset < len ( data ) else [ ] , len ( data ) ) ssldata = [ ] view = memoryview ( data ) while True : self . _need_ssldata = False try : if offset < len ( view ) : offset += self . _sslobj . write ( view [ offset : ] ) except ssl . SSLError as e : # It is not allowed to call write ( ) after unwrap ( ) until the # close _ notify is acknowledged . We return the condition to the # caller as a short write . if sslcompat . get_reason ( e ) == 'PROTOCOL_IS_SHUTDOWN' : e . errno = ssl . SSL_ERROR_WANT_READ if e . errno not in ( ssl . SSL_ERROR_WANT_READ , ssl . SSL_ERROR_WANT_WRITE , ssl . SSL_ERROR_SYSCALL ) : raise self . _need_ssldata = e . errno == ssl . SSL_ERROR_WANT_READ # See if there ' s any record level data back for us . if self . _outgoing . pending : ssldata . append ( self . _outgoing . read ( ) ) if offset == len ( view ) or self . _need_ssldata : break return ( ssldata , offset )
def argsort ( self , axis = - 1 , kind = 'quicksort' , order = None ) : """Returns the indices that would sort an array . . . note : : This method wraps ` numpy . argsort ` . This documentation is modified from that of ` numpy . argsort ` . Perform an indirect sort along the given axis using the algorithm specified by the ` kind ` keyword . It returns an array of indices of the same shape as the original array that index data along the given axis in sorted order . * * Parameters * * * * axis * * : int or None , optional Axis along which to sort . The default is - 1 ( the last axis ) . If ` None ` , the flattened array is used . * * kind * * : { ' quicksort ' , ' mergesort ' , ' heapsort ' } , optional Sorting algorithm . * * order * * : list , optional This argument specifies which fields to compare first , second , etc . Not all fields need be specified . * * Returns * * * * index _ array * * : ndarray , int Array of indices that sort the tabarray along the specified axis . In other words , ` ` a [ index _ array ] ` ` yields a sorted ` a ` . * * See Also * * sort : Describes sorting algorithms used . lexsort : Indirect stable sort with multiple keys . ndarray . sort : Inplace sort . * * Notes * * See ` numpy . sort ` for notes on the different sorting algorithms . * * Examples * * Sorting with keys : > > > x = tabarray ( [ ( 1 , 0 ) , ( 0 , 1 ) ] , dtype = [ ( ' x ' , ' < i4 ' ) , ( ' y ' , ' < i4 ' ) ] ) tabarray ( [ ( 1 , 0 ) , ( 0 , 1 ) ] , dtype = [ ( ' x ' , ' < i4 ' ) , ( ' y ' , ' < i4 ' ) ] ) > > > x . argsort ( order = ( ' x ' , ' y ' ) ) array ( [ 1 , 0 ] ) > > > x . argsort ( order = ( ' y ' , ' x ' ) ) array ( [ 0 , 1 ] )"""
index_array = np . core . fromnumeric . _wrapit ( self , 'argsort' , axis , kind , order ) index_array = index_array . view ( np . ndarray ) return index_array
def delete_all ( self ) : '''Delete all books from the index'''
def delete_action_gen ( ) : scanner = scan ( self . es , index = self . index_name , query = { 'query' : { 'match_all' : { } } } ) for v in scanner : yield { '_op_type' : 'delete' , '_index' : self . index_name , '_type' : v [ '_type' ] , '_id' : v [ '_id' ] , } bulk ( self . es , delete_action_gen ( ) )
def find_id ( self , element_id ) : """Find a single element with the given ID . Parameters element _ id : str ID of the element to find Returns found element"""
element = _transform . FigureElement . find_id ( self , element_id ) return Element ( element . root )
def level_i18n_name ( self ) : """In use within templates for dynamic translations ."""
for level , name in spatial_granularities : if self . level == level : return name return self . level_name
def tauc_from_mass ( mass_g ) : """Estimate the convective turnover time from mass , using the method described in Cook + ( 2014ApJ . . . 785 . . . 10C ) . mass _ g - UCD mass in grams . Returns : the convective turnover timescale in seconds . Masses larger than 1.3 Msun are out of range and yield NaN . If the mass is < 0.1 Msun , the turnover time is fixed at 70 days . The Cook method was inspired by the description in McLean + (2012ApJ . . . 746 . . . 23M ) . It is a hybrid of the method described in Reiners & Basri ( 2010ApJ . . . 710 . . 924R ) and the data shown in Kiraga & Stepien (2007AcA . . . . 57 . . 149K ) . However , this version imposes the 70 - day cutoff in terms of mass , not spectral type , so that it is entirely defined in terms of a single quantity . There are discontinuities between the different break points ! Any future use should tweak the coefficients to make everything smooth ."""
m = mass_g / cgs . msun return np . piecewise ( m , [ m < 1.3 , m < 0.82 , m < 0.65 , m < 0.1 ] , [ lambda x : 61.7 - 44.7 * x , 25. , lambda x : 86.9 - 94.3 * x , 70. , np . nan ] ) * 86400.
def action_display ( self ) : '''The action text , with any hyperlinked related entities .'''
action = self [ 'action' ] annotations = [ ] abbr = self . bill [ settings . LEVEL_FIELD ] if 'related_entities' in self : for entity in self [ 'related_entities' ] : name = entity [ 'name' ] _id = entity [ 'id' ] # If the importer couldn ' t ID the entity , # skip . if _id is None : continue url = mongoid_2_url ( abbr , _id ) link = '<a href="%s">%s</a>' % ( url , name ) if name in action : action = action . replace ( entity [ 'name' ] , link ) else : annotations . append ( link ) if annotations : action += ' (%s)' % ', ' . join ( annotations ) return action
def export_to_dicts ( table , * args , ** kwargs ) : """Export a ` rows . Table ` to a list of dicts"""
field_names = table . field_names return [ { key : getattr ( row , key ) for key in field_names } for row in table ]
def _index_range ( self , version , symbol , date_range = None , ** kwargs ) : """Given a version , read the segment _ index and return the chunks associated with the date _ range . As the segment index is ( id - > last datetime ) we need to take care in choosing the correct chunks ."""
if date_range and 'segment_index' in version : # index is read - only but it ' s never written to index = np . frombuffer ( decompress ( version [ 'segment_index' ] ) , dtype = INDEX_DTYPE ) dtcol = self . _datetime64_index ( index ) if dtcol and len ( index ) : dts = index [ dtcol ] start , end = _start_end ( date_range , dts ) if start > dts [ - 1 ] : return - 1 , - 1 idxstart = min ( np . searchsorted ( dts , start ) , len ( dts ) - 1 ) idxend = min ( np . searchsorted ( dts , end , side = 'right' ) , len ( dts ) - 1 ) return int ( index [ 'index' ] [ idxstart ] ) , int ( index [ 'index' ] [ idxend ] + 1 ) return super ( PandasStore , self ) . _index_range ( version , symbol , ** kwargs )
def _determine_filtered_package_names ( self ) : """Return a list of package names to be filtered base on the configuration file ."""
# This plugin only processes packages , if the line in the packages # configuration contains a PEP440 specifier it will be processed by the # blacklist release filter . So we need to remove any packages that # are not applicable for this plugin . filtered_packages = set ( ) try : lines = self . configuration [ "blacklist" ] [ "packages" ] package_lines = lines . split ( "\n" ) except KeyError : package_lines = [ ] for package_line in package_lines : package_line = package_line . strip ( ) if not package_line or package_line . startswith ( "#" ) : continue package_requirement = Requirement ( package_line ) if package_requirement . specifier : continue if package_requirement . name != package_line : logger . debug ( "Package line %r does not requirement name %r" , package_line , package_requirement . name , ) continue filtered_packages . add ( package_line ) logger . debug ( "Project blacklist is %r" , list ( filtered_packages ) ) return list ( filtered_packages )
def from_api ( cls , api ) : """create an application description for the todo app , that based on the api can use either tha api or the ux for interaction"""
ux = TodoUX ( api ) from . pseudorpc import PseudoRpc rpc = PseudoRpc ( api ) return cls ( { ViaAPI : api , ViaUX : ux , ViaRPC : rpc } )
def formatted ( self ) : """str : The IBAN formatted in blocks of 4 digits ."""
return ' ' . join ( self . compact [ i : i + 4 ] for i in range ( 0 , len ( self . compact ) , 4 ) )
def normalize_value ( text ) : """This removes newlines and multiple spaces from a string ."""
result = text . replace ( '\n' , ' ' ) result = re . subn ( '[ ]{2,}' , ' ' , result ) [ 0 ] return result
def get_name_locations ( self , name ) : """Return a list of ` ` ( resource , lineno ) ` ` tuples"""
result = [ ] for module in self . names : if name in self . names [ module ] : try : pymodule = self . project . get_module ( module ) if name in pymodule : pyname = pymodule [ name ] module , lineno = pyname . get_definition_location ( ) if module is not None : resource = module . get_module ( ) . get_resource ( ) if resource is not None and lineno is not None : result . append ( ( resource , lineno ) ) except exceptions . ModuleNotFoundError : pass return result
def K_gate_valve_Crane ( D1 , D2 , angle , fd = None ) : r'''Returns loss coefficient for a gate valve of types wedge disc , double disc , or plug type , as shown in [ 1 ] _ . If β = 1 and θ = 0: . . math : : K = K _ 1 = K _ 2 = 8f _ d If β < 1 and θ < = 45 ° : . . math : : K _ 2 = \ frac { K + \ sin \ frac { \ theta } { 2 } \ left [ 0.8(1 - \ beta ^ 2) + 2.6(1 - \ beta ^ 2 ) ^ 2 \ right ] } { \ beta ^ 4} If β < 1 and θ > 45 ° : . . math : : K _ 2 = \ frac { K + 0.5 \ sqrt { \ sin \ frac { \ theta } { 2 } } ( 1 - \ beta ^ 2) + ( 1 - \ beta ^ 2 ) ^ 2 } { \ beta ^ 4} Parameters D1 : float Diameter of the valve seat bore ( must be smaller or equal to ` D2 ` ) , [ m ] D2 : float Diameter of the pipe attached to the valve , [ m ] angle : float Angle formed by the reducer in the valve , [ degrees ] fd : float , optional Darcy friction factor calculated for the actual pipe flow in clean steel ( roughness = 0.0018 inch ) in the fully developed turbulent region ; do not specify this to use the original Crane friction factor ! Returns K : float Loss coefficient with respect to the pipe inside diameter [ - ] Notes This method is not valid in the laminar regime and the pressure drop will be underestimated in those conditions [ 2 ] _ . Examples Example 7-4 in [ 1 ] _ ; a 150 by 100 mm class 600 steel gate valve , conically tapered ports , length 550 mm , back of sear ring ~ 150 mm . The valve is connected to 146 mm schedule 80 pipe . The angle can be calculated to be 13 degrees . The valve is specified to be operating in turbulent conditions . > > > K _ gate _ valve _ Crane ( D1 = . 1 , D2 = . 146 , angle = 13.115) 1.1466029421844073 The calculated result is lower than their value of 1.22 ; the difference is due to Crane ' s generous intermediate rounding . A later , Imperial edition of Crane rounds differently - and comes up with K = 1.06. References . . [ 1 ] Crane Co . Flow of Fluids Through Valves , Fittings , and Pipe . Crane , 2009. . . [ 2 ] Harvey Wilson . " Pressure Drop in Pipe Fittings and Valves | Equivalent Length and Resistance Coefficient . " Katmar Software . Accessed July 28 , 2017 . http : / / www . katmarsoftware . com / articles / pipe - fitting - pressure - drop . htm .'''
angle = radians ( angle ) beta = D1 / D2 if fd is None : fd = ft_Crane ( D2 ) K1 = 8.0 * fd # This does not refer to upstream loss per se if beta == 1 or angle == 0 : return K1 # upstream and down else : beta2 = beta * beta one_m_beta2 = 1.0 - beta2 if angle <= 0.7853981633974483 : K = ( K1 + sin ( 0.5 * angle ) * ( 0.8 * one_m_beta2 + 2.6 * one_m_beta2 * one_m_beta2 ) ) / ( beta2 * beta2 ) else : K = ( K1 + 0.5 * ( sin ( 0.5 * angle ) ) ** 0.5 * one_m_beta2 + one_m_beta2 * one_m_beta2 ) / ( beta2 * beta2 ) return K
def update_from_dict ( self , keywords ) : """Update metadata value from a keywords dictionary . : param keywords : : return :"""
super ( ImpactLayerMetadata , self ) . update_from_dict ( keywords ) if 'if_provenance' in list ( keywords . keys ( ) ) : if_provenance = keywords [ 'if_provenance' ] for provenance_step in if_provenance : self . provenance . append_provenance_step ( provenance_step )
def call_method ( self , method , * args ) : """Call a JSON - RPC method and wait for its result . The * method * is called with positional arguments * args * . On success , the ` ` result ` ` field from the JSON - RPC response is returned . On error , a : class : ` JsonRpcError ` is raised , which you can use to access the ` ` error ` ` field of the JSON - RPC response ."""
message = self . _version . create_request ( method , args ) msgid = message [ 'id' ] try : with switch_back ( self . _timeout ) as switcher : self . _method_calls [ msgid ] = switcher self . send_message ( message ) args , _ = self . _hub . switch ( ) finally : self . _method_calls . pop ( msgid , None ) response = args [ 0 ] assert response [ 'id' ] == msgid error = response . get ( 'error' ) if error is not None : raise JsonRpcError ( 'error response calling "{}"' . format ( method ) , error ) return response [ 'result' ]
def GlobForPaths ( self , paths , pathtype = "OS" , root_path = None , process_non_regular_files = False , collect_ext_attrs = False ) : """Starts the Glob . This is the main entry point for this flow mixin . First we convert the pattern into regex components , and then we interpolate each component . Finally , we generate a cartesian product of all combinations . Args : paths : A list of GlobExpression instances . pathtype : The pathtype to use for creating pathspecs . root _ path : A pathspec where to start searching from . process _ non _ regular _ files : Work with all kinds of files - not only with regular ones . collect _ ext _ attrs : Whether to gather information about file extended attributes ."""
patterns = [ ] if not paths : # Nothing to do . return self . state . pathtype = pathtype self . state . root_path = root_path self . state . process_non_regular_files = process_non_regular_files self . state . collect_ext_attrs = collect_ext_attrs # Transform the patterns by substitution of client attributes . When the # client has multiple values for an attribute , this generates multiple # copies of the pattern , one for each variation . e . g . : # / home / % % Usernames % % / * - > [ / home / user1 / * , / home / user2 / * ] for path in paths : patterns . extend ( path . Interpolate ( knowledge_base = self . client_knowledge_base ) ) # Sort the patterns so that if there are files whose paths conflict with # directory paths , the files get handled after the conflicting directories # have been added to the component tree . patterns . sort ( key = len , reverse = True ) # Expand each glob pattern into a list of components . A component is either # a wildcard or a literal component . # e . g . / usr / lib / * . exe - > [ ' / usr / lib ' , ' . * . exe ' ] # We build a tree for each component such that duplicated components are # merged . We do not need to reissue the same client requests for the same # components . For example , the patterns : # ' / home / % % Usernames % % * ' - > { ' / home / ' : { # ' syslog . * \ \ Z ( ? ms ) ' : { } , ' test . * \ \ Z ( ? ms ) ' : { } } } # Note : The component tree contains serialized pathspecs in dicts . for pattern in patterns : # The root node . curr_node = self . state . component_tree components = self . ConvertGlobIntoPathComponents ( pattern ) for i , curr_component in enumerate ( components ) : is_last_component = i == len ( components ) - 1 next_node = curr_node . get ( curr_component . SerializeToString ( ) , { } ) if is_last_component and next_node : # There is a conflicting directory already existing in the tree . # Replace the directory node with a node representing this file . curr_node [ curr_component . SerializeToString ( ) ] = { } else : curr_node = curr_node . setdefault ( curr_component . SerializeToString ( ) , { } ) root_path = next ( iterkeys ( self . state . component_tree ) ) self . CallStateInline ( messages = [ None ] , next_state = "ProcessEntry" , request_data = dict ( component_path = [ root_path ] ) )
def main ( ) : """Return 0 on success ."""
args = parse_args ( ) if not args . files : return 0 with enable_sphinx_if_possible ( ) : status = 0 pool = multiprocessing . Pool ( multiprocessing . cpu_count ( ) ) try : if len ( args . files ) > 1 : results = pool . map ( _check_file , [ ( name , args ) for name in args . files ] ) else : # This is for the case where we read from standard in . results = [ _check_file ( ( args . files [ 0 ] , args ) ) ] for ( filename , errors ) in results : for error in errors : line_number = error [ 0 ] message = error [ 1 ] if not re . match ( r'\([A-Z]+/[0-9]+\)' , message ) : message = '(ERROR/3) ' + message output_message ( '{}:{}: {}' . format ( filename , line_number , message ) ) status = 1 except ( IOError , UnicodeError ) as exception : output_message ( exception ) status = 1 return status
def get_symbol_returns_from_yahoo ( symbol , start = None , end = None ) : """Wrapper for pandas . io . data . get _ data _ yahoo ( ) . Retrieves prices for symbol from yahoo and computes returns based on adjusted closing prices . Parameters symbol : str Symbol name to load , e . g . ' SPY ' start : pandas . Timestamp compatible , optional Start date of time period to retrieve end : pandas . Timestamp compatible , optional End date of time period to retrieve Returns pandas . DataFrame Returns of symbol in requested period ."""
try : px = web . get_data_yahoo ( symbol , start = start , end = end ) px [ 'date' ] = pd . to_datetime ( px [ 'date' ] ) px . set_index ( 'date' , drop = False , inplace = True ) rets = px [ [ 'adjclose' ] ] . pct_change ( ) . dropna ( ) except Exception as e : warnings . warn ( 'Yahoo Finance read failed: {}, falling back to Google' . format ( e ) , UserWarning ) px = web . get_data_google ( symbol , start = start , end = end ) rets = px [ [ 'Close' ] ] . pct_change ( ) . dropna ( ) rets . index = rets . index . tz_localize ( "UTC" ) rets . columns = [ symbol ] return rets