signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def collect ( * args , ** kwargs ) : '''collect ( m1 , m2 , . . . ) yields a persistent map whose keys are the union of all keys in the given maps m1 , m2 , etc . and whose values are tuples containing each of the given maps ( in provided order ) that contain the given key . This function never evaluates the values in the maps so it implicitly supports laziness . The collect function fist passes its arguments to flatten _ maps , so it is fine to pass lists or nested lists of maps to this function ; all will be collected .'''
args = flatten_maps ( args , ** kwargs ) if len ( args ) == 0 : return ps . m ( ) m = { } for arg in args : for k in six . iterkeys ( arg ) : if k in m : m [ k ] . append ( arg ) else : m [ k ] = [ arg ] return ps . pmap ( { k : tuple ( v ) for ( k , v ) in six . iteritems ( m ) } )
def redactor ( blacklist : Dict [ Pattern , str ] ) -> Callable [ [ str ] , str ] : """Returns a function which transforms a str , replacing all matches for its replacement"""
def processor_wrapper ( msg : str ) -> str : for regex , repl in blacklist . items ( ) : if repl is None : repl = '<redacted>' msg = regex . sub ( repl , msg ) return msg return processor_wrapper
def close ( self ) : """Close this session , and any open channels that are tied to it ."""
if not self . active : return self . stop_thread ( ) for chan in list ( self . _channels . values ( ) ) : chan . _unlink ( ) self . sock . close ( )
def auto_repr ( obj : Any , with_addr : bool = False , sort_attrs : bool = True , joiner : str = COMMA_SPACE ) -> str : """Convenience function for : func : ` _ _ repr _ _ ` . Works its way through the object ' s ` ` _ _ dict _ _ ` ` and reports accordingly . Args : obj : object to display with _ addr : include the memory address of ` ` obj ` ` sort _ attrs : sort the attributes into alphabetical order ? joiner : string with which to join the elements Returns : string : : func : ` repr ` - style representation"""
if sort_attrs : keys = sorted ( obj . __dict__ . keys ( ) ) else : keys = obj . __dict__ . keys ( ) elements = [ "{}={}" . format ( k , repr ( getattr ( obj , k ) ) ) for k in keys ] return repr_result ( obj , elements , with_addr = with_addr , joiner = joiner )
def findViewWithAttributeOrRaise ( self , attr , val , root = "ROOT" ) : '''Finds the View or raise a ViewNotFoundException . @ return : the View found @ raise ViewNotFoundException : raise the exception if View not found'''
view = self . findViewWithAttribute ( attr , val , root ) if view : return view else : raise ViewNotFoundException ( attr , val , root )
def sort ( polylines ) : """sort points within polyline p0 - p1 - p2 . . ."""
for n , c in enumerate ( polylines ) : l = len ( c ) if l > 2 : # DEFINE FIRST AND LAST INDEX A THOSE TWO POINTS THAT # HAVE THE BIGGEST DIFFERENCE FROM A MIDDLE : mid = c . mean ( axis = 0 ) distV = ( c - mid ) dists = norm ( distV , axis = - 1 ) firstI = np . argmax ( dists ) sign = np . sign ( distV [ firstI ] ) dd = np . logical_or ( np . sign ( distV [ : , 0 ] ) != sign [ 0 ] , np . sign ( distV [ : , 1 ] ) != sign [ 1 ] ) dists [ ~ dd ] = 0 lastI = np . argmax ( dists ) ind = _sort ( c , firstI , lastI ) c = c [ ind ] polylines [ n ] = c
def list_files ( start_path ) : """tree unix command replacement ."""
s = u'\n' for root , dirs , files in os . walk ( start_path ) : level = root . replace ( start_path , '' ) . count ( os . sep ) indent = ' ' * 4 * level s += u'{}{}/\n' . format ( indent , os . path . basename ( root ) ) sub_indent = ' ' * 4 * ( level + 1 ) for f in files : s += u'{}{}\n' . format ( sub_indent , f ) return s
def align_matrices ( m1 , m2 , dfun = 'sqrdiff' , verbose = False , H = 1e6 , Texp = 1 , T0 = 1e-3 , Hbrk = 10 ) : '''This function aligns two matrices relative to one another by reordering the nodes in M2 . The function uses a version of simulated annealing . Parameters M1 : NxN np . ndarray first connection matrix M2 : NxN np . ndarray second connection matrix dfun : str distance metric to use for matching ' absdiff ' : absolute difference ' sqrdiff ' : squared difference ( default ) ' cosang ' : cosine of vector angle verbose : bool print out cost at each iteration . Default False . H : int annealing parameter , default value 1e6 Texp : int annealing parameter , default value 1 . Coefficient of H s . t . Texp0 = 1 - Texp / H T0 : float annealing parameter , default value 1e - 3 Hbrk : int annealing parameter , default value = 10 . Coefficient of H s . t . Hbrk0 = H / Hkbr Returns Mreordered : NxN np . ndarray reordered connection matrix M2 Mindices : Nx1 np . ndarray reordered indices cost : float objective function distance between M1 and Mreordered Notes Connection matrices can be weighted or binary , directed or undirected . They must have the same number of nodes . M1 can be entered in any node ordering . Note that in general , the outcome will depend on the initial condition ( the setting of the random number seed ) . Also , there is no good way to determine optimal annealing parameters in advance - these parameters will need to be adjusted " by hand " ( particularly H , Texp , T0 , and Hbrk ) . For large and / or dense matrices , it is highly recommended to perform exploratory runs varying the settings of ' H ' and ' Texp ' and then select the best values . Based on extensive testing , it appears that T0 and Hbrk can remain unchanged in most cases . Texp may be varied from 1-1 / H to 1-10 / H , for example . H is the most important parameter - set to larger values as the problem size increases . Good solutions can be obtained for matrices up to about 100 nodes . It is advisable to run this function multiple times and select the solution ( s ) with the lowest ' cost ' . If the two matrices are related it may be very helpful to pre - align them by reordering along their largest eigenvectors : [ v , ~ ] = eig ( M1 ) ; v1 = abs ( v ( : , end ) ) ; [ a1 , b1 ] = sort ( v1 ) ; [ v , ~ ] = eig ( M2 ) ; v2 = abs ( v ( : , end ) ) ; [ a2 , b2 ] = sort ( v2 ) ; [ a , b , c ] = overlapMAT2 ( M1 ( b1 , b1 ) , M2 ( b2 , b2 ) , ' dfun ' , 1 ) ; Setting ' Texp ' to zero cancels annealing and uses a greedy algorithm instead .'''
n = len ( m1 ) if n < 2 : raise BCTParamError ( "align_matrix will infinite loop on a singleton " "or null matrix." ) # define maxcost ( greatest possible difference ) and lowcost if dfun in ( 'absdiff' , 'absdff' ) : maxcost = np . sum ( np . abs ( np . sort ( m1 . flat ) - np . sort ( m2 . flat ) [ : : - 1 ] ) ) lowcost = np . sum ( np . abs ( m1 - m2 ) ) / maxcost elif dfun in ( 'sqrdiff' , 'sqrdff' ) : maxcost = np . sum ( ( np . sort ( m1 . flat ) - np . sort ( m2 . flat ) [ : : - 1 ] ) ** 2 ) lowcost = np . sum ( ( m1 - m2 ) ** 2 ) / maxcost elif dfun == 'cosang' : maxcost = np . pi / 2 lowcost = np . arccos ( np . dot ( m1 . flat , m2 . flat ) / np . sqrt ( np . dot ( m1 . flat , m1 . flat ) * np . dot ( m2 . flat , m2 . flat ) ) ) / maxcost else : raise BCTParamError ( 'dfun must be absdiff or sqrdiff or cosang' ) mincost = lowcost anew = np . arange ( n ) amin = np . arange ( n ) h = 0 hcnt = 0 # adjust annealing parameters from user provided coefficients # H determines the maximal number of steps ( user - provided ) # Texp determines the steepness of the temperature gradient Texp = 1 - Texp / H # T0 sets the initial temperature and scales the energy term ( user provided ) # Hbrk sets a break point for the stimulation Hbrk = H / Hbrk while h < H : h += 1 hcnt += 1 # terminate if no new mincost has been found for some time if hcnt > Hbrk : break # current temperature T = T0 * ( Texp ** h ) # choose two positions at random and flip them atmp = anew . copy ( ) r1 , r2 = rng . randint ( n , size = ( 2 , ) ) while r1 == r2 : r2 = rng . randint ( n ) atmp [ r1 ] = anew [ r2 ] atmp [ r2 ] = anew [ r1 ] m2atmp = m2 [ np . ix_ ( atmp , atmp ) ] if dfun in ( 'absdiff' , 'absdff' ) : costnew = np . sum ( np . abs ( m1 - m2atmp ) ) / maxcost elif dfun in ( 'sqrdiff' , 'sqrdff' ) : costnew = np . sum ( ( m1 - m2atmp ) ** 2 ) / maxcost elif dfun == 'cosang' : costnew = np . arccos ( np . dot ( m1 . flat , m2atmp . flat ) / np . sqrt ( np . dot ( m1 . flat , m1 . flat ) * np . dot ( m2 . flat , m2 . flat ) ) ) / maxcost if costnew < lowcost or rng . random_sample ( ) < np . exp ( - ( costnew - lowcost ) / T ) : anew = atmp lowcost = costnew # is this the absolute best ? if lowcost < mincost : amin = anew mincost = lowcost if verbose : print ( 'step %i ... current lowest cost = %f' % ( h , mincost ) ) hcnt = 0 # if the cost is 0 we ' re done if mincost == 0 : break if verbose : print ( 'step %i ... final lowest cost = %f' % ( h , mincost ) ) M_reordered = m2 [ np . ix_ ( amin , amin ) ] M_indices = amin cost = mincost return M_reordered , M_indices , cost
def flood ( im , regions = None , mode = 'max' ) : r"""Floods / fills each region in an image with a single value based on the specific values in that region . The ` ` mode ` ` argument is used to determine how the value is calculated . Parameters im : array _ like An ND image with isolated regions containing 0 ' s elsewhere . regions : array _ like An array the same shape as ` ` im ` ` with each region labeled . If None is supplied ( default ) then ` ` scipy . ndimage . label ` ` is used with its default arguments . mode : string Specifies how to determine which value should be used to flood each region . Options are : ' max ' - Floods each region with the local maximum in that region ' min ' - Floods each region the local minimum in that region ' size ' - Floods each region with the size of that region Returns image : ND - array A copy of ` ` im ` ` with new values placed in each forground voxel based on the ` ` mode ` ` . See Also props _ to _ image"""
mask = im > 0 if regions is None : labels , N = spim . label ( mask ) else : labels = sp . copy ( regions ) N = labels . max ( ) I = im . flatten ( ) L = labels . flatten ( ) if mode . startswith ( 'max' ) : V = sp . zeros ( shape = N + 1 , dtype = float ) for i in range ( len ( L ) ) : if V [ L [ i ] ] < I [ i ] : V [ L [ i ] ] = I [ i ] elif mode . startswith ( 'min' ) : V = sp . ones ( shape = N + 1 , dtype = float ) * sp . inf for i in range ( len ( L ) ) : if V [ L [ i ] ] > I [ i ] : V [ L [ i ] ] = I [ i ] elif mode . startswith ( 'size' ) : V = sp . zeros ( shape = N + 1 , dtype = int ) for i in range ( len ( L ) ) : V [ L [ i ] ] += 1 im_flooded = sp . reshape ( V [ labels ] , newshape = im . shape ) im_flooded = im_flooded * mask return im_flooded
def get_argv_for_command ( self ) : """Returns stripped arguments that would be passed into the command ."""
argv = [ a for a in self . argv ] argv . insert ( 0 , self . prog_name ) return argv
def flush_keys ( self , signed_prekey , prekeys , reboot_connection = False ) : """sends prekeys : return : : rtype :"""
preKeysDict = { } for prekey in prekeys : keyPair = prekey . getKeyPair ( ) preKeysDict [ self . adjustId ( prekey . getId ( ) ) ] = self . adjustArray ( keyPair . getPublicKey ( ) . serialize ( ) [ 1 : ] ) signedKeyTuple = ( self . adjustId ( signed_prekey . getId ( ) ) , self . adjustArray ( signed_prekey . getKeyPair ( ) . getPublicKey ( ) . serialize ( ) [ 1 : ] ) , self . adjustArray ( signed_prekey . getSignature ( ) ) ) setKeysIq = SetKeysIqProtocolEntity ( self . adjustArray ( self . manager . identity . getPublicKey ( ) . serialize ( ) [ 1 : ] ) , signedKeyTuple , preKeysDict , Curve . DJB_TYPE , self . adjustId ( self . manager . registration_id ) ) onResult = lambda _ , __ : self . on_keys_flushed ( prekeys , reboot_connection = reboot_connection ) self . _sendIq ( setKeysIq , onResult , self . onSentKeysError )
def collect_single_s3 ( self , path ) : """Collect single S3 artifact : param : path string : S3 path"""
# The S3 folder contains the tokens needed to perform # matching of project , gitref , etc . folder = os . path . dirname ( path ) rinfo = re . findall ( r'(?P<tag>[^-]+)-(?P<val>.*?)__' , folder ) if rinfo is None or len ( rinfo ) == 0 : # print ( ' Incorrect folder / file name format for % s ' % folder ) return None info = dict ( rinfo ) # Ignore AppVeyor Debug builds if info . get ( 'bldtype' , '' ) . lower ( ) == 'debug' : print ( 'Ignoring debug artifact %s' % folder ) return None tag = info . get ( 'tag' , None ) if tag is not None and ( len ( tag ) == 0 or tag . startswith ( '$(' ) ) : # AppVeyor doesn ' t substite $ ( APPVEYOR _ REPO _ TAG _ NAME ) # with an empty value when not set , it leaves that token # in the string - so translate that to no tag . tag = None sha = info . get ( 'sha' , None ) # Match tag or sha to gitref if ( tag is not None and tag == self . gitref ) or ( sha is not None and sha . startswith ( self . gitref ) ) : return Artifact ( self , path , info ) return None
def check ( self , action , page = None , lang = None , method = None ) : """Return ` ` True ` ` if the current user has permission on the page ."""
if self . user . is_superuser : return True if action == 'change' : return self . has_change_permission ( page , lang , method ) if action == 'delete' : if not self . delete_page ( ) : return False return True if action == 'add' : if not self . add_page ( ) : return False return True if action == 'freeze' : perm = self . user . has_perm ( 'pages.can_freeze' ) if perm : return True return False if action == 'publish' : perm = self . user . has_perm ( 'pages.can_publish' ) if perm : return True return False return False
def get_lm_f0tau ( mass , spin , l , m , nmodes ) : """Return the f0 and the tau of each overtone for a given l , m mode . Parameters mass : float or array Mass of the black hole ( in solar masses ) . spin : float or array Dimensionless spin of the final black hole . l : int or array l - index of the harmonic . m : int or array m - index of the harmonic . nmodes : int The number of overtones to generate . Returns f0 : float or array The frequency of the QNM ( s ) , in Hz . If only a single mode is requested ( and mass , spin , l , and m are not arrays ) , this will be a float . If multiple modes requested , will be an array with shape ` ` [ input shape x ] nmodes ` ` , where ` ` input shape ` ` is the broadcasted shape of the inputs . tau : float or array The damping time of the QNM ( s ) , in seconds . Return type is same as f0."""
# convert to arrays mass , spin , l , m , input_is_array = ensurearray ( mass , spin , l , m ) # we ' ll ravel the arrays so we can evaluate each parameter combination # one at a a time origshape = mass . shape if nmodes < 1 : raise ValueError ( "nmodes must be >= 1" ) if nmodes > 1 : newshape = tuple ( list ( origshape ) + [ nmodes ] ) else : newshape = origshape f0s = numpy . zeros ( ( mass . size , nmodes ) ) taus = numpy . zeros ( ( mass . size , nmodes ) ) mass = mass . ravel ( ) spin = spin . ravel ( ) l = l . ravel ( ) m = m . ravel ( ) qnmfreq = None modes = range ( nmodes ) for ii in range ( mass . size ) : qnmfreq = _genqnmfreq ( mass [ ii ] , spin [ ii ] , l [ ii ] , m [ ii ] , nmodes , qnmfreq = qnmfreq ) f0s [ ii , : ] = [ qnmfreq . data [ n ] . real / ( 2 * numpy . pi ) for n in modes ] taus [ ii , : ] = [ 1. / qnmfreq . data [ n ] . imag for n in modes ] f0s = f0s . reshape ( newshape ) taus = taus . reshape ( newshape ) return ( formatreturn ( f0s , input_is_array ) , formatreturn ( taus , input_is_array ) )
def pid ( col , ignore_gaps = False ) : """Compute the percent identity of a an alignment column . Define PID as the frequency of the most frequent nucleotide in the column . : param col : an alignment column ; a dictionary where keys are seq . names and values are the nucleotide in the column for that sequence . : param ignore _ gaps : if True , do not count gaps towards the total number of sequences in the column ( i . e . the denominator of the fraction ) . : raise ValueError : if the column contains only gaps ."""
hist = { } total = 0 found_non_gap = False for v in col . values ( ) : if v == sequence . GAP_CHAR : if ignore_gaps : continue else : total += 1 else : found_non_gap = True if v not in hist : hist [ v ] = 0 hist [ v ] += 1 total += 1 if not found_non_gap : raise ValueError ( "Cannot determine PID of column with only gaps" ) return max ( hist . values ( ) ) / float ( total )
def from_dict ( cls , d ) : """Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using the as _ dict method . : param d : dict representation of the VoronoiContainer object : return : VoronoiContainer object"""
structure = Structure . from_dict ( d [ 'structure' ] ) voronoi_list2 = from_bson_voronoi_list2 ( d [ 'bson_nb_voro_list2' ] , structure ) maximum_distance_factor = d [ 'maximum_distance_factor' ] if 'maximum_distance_factor' in d else None minimum_angle_factor = d [ 'minimum_angle_factor' ] if 'minimum_angle_factor' in d else None return cls ( structure = structure , voronoi_list2 = voronoi_list2 , # neighbors _ lists = neighbors _ lists , normalized_angle_tolerance = d [ 'normalized_angle_tolerance' ] , normalized_distance_tolerance = d [ 'normalized_distance_tolerance' ] , additional_conditions = d [ 'additional_conditions' ] , valences = d [ 'valences' ] , maximum_distance_factor = maximum_distance_factor , minimum_angle_factor = minimum_angle_factor )
def slice ( self , items ) : '''Slice the sequence of all items to obtain them for current page .'''
if self . limit : if self . page > self . pages_count : return [ ] if self . page == self . pages_count : return items [ self . limit * ( self . page - 1 ) : ] return items [ self . limit * ( self . page - 1 ) : self . limit * self . page ] else : return items [ : ]
def can_render ( self , partial_mimetype ) : """Given a partial mimetype ( such as ' json ' or ' html ' ) , return if the view can render that type ."""
for mime in self . render_map . keys ( ) : if mime == '*/*' : return True if partial_mimetype in mime : return True return False
def apps_upload_create ( self , data , ** kwargs ) : "https : / / developer . zendesk . com / rest _ api / docs / core / apps # upload - app - package"
api_path = "/api/v2/apps/uploads.json" return self . call ( api_path , method = "POST" , data = data , ** kwargs )
def get_mor_by_name ( si , obj_type , obj_name ) : '''Get reference to an object of specified object type and name si ServiceInstance for the vSphere or ESXi server ( see get _ service _ instance ) obj _ type Type of the object ( vim . StoragePod , vim . Datastore , etc ) obj _ name Name of the object'''
inventory = get_inventory ( si ) container = inventory . viewManager . CreateContainerView ( inventory . rootFolder , [ obj_type ] , True ) for item in container . view : if item . name == obj_name : return item return None
def main ( ) : """Function for PDA to CNF Operation : type argv : list : param argv : Parameters"""
if len ( argv ) < 3 : print 'Usage for getting CFG: %s CFG_fileA CFG ' % argv [ 0 ] print 'Usage for getting STR: %s CFG_fileA STR ' 'Optimize[0 or 1] splitstring[0 or 1] ' % argv [ 0 ] print '' print 'For example: python pdacnf.py grammar.y STR 1 0' print ' python pdacnf.py grammar.y STR 1 1' print ' python pdacnf.py grammar.y CFG' return alphabet = createalphabet ( ) mode = argv [ 2 ] optimized = 0 splitstring = 0 if mode == 'STR' : optimized = int ( argv [ 3 ] ) splitstring = int ( argv [ 4 ] ) cfgtopda = CfgPDA ( alphabet ) print '* Parsing Grammar:' , mma = cfgtopda . yyparse ( argv [ 1 ] ) print 'OK' print ' - Total PDA states are ' + repr ( len ( mma . s ) ) print '* Simplify State IDs:' , simple_a = SimplifyStateIDs ( ) mma . s , biggestid , newaccepted = simple_a . get ( mma . s ) if newaccepted : print 'OK' else : print 'OK' print '* Eliminate READ states:' , replace = ReadReplace ( mma . s , biggestid ) mma . s = replace . replace_read ( ) print 'OK' print ' - Total PDA states now are ' + repr ( len ( mma . s ) ) maxstate = replace . nextstate ( ) - 1 print '* Reduce PDA:' , simple_b = ReducePDA ( ) mma . s = simple_b . get ( mma . s ) print 'OK' print ' - Total PDA states now are ' + repr ( len ( mma . s ) ) print '* PDA to CFG transformation:' , cnfgenerator = PdaCnf ( mma . s ) grammar = cnfgenerator . get_rules ( optimized ) print 'OK' print ' - Total CFG rules generated: ' + repr ( len ( grammar ) ) if mode == 'STR' : gen = CFGGenerator ( CNFGenerator ( grammar ) , optimized = optimized , splitstring = splitstring , maxstate = maxstate ) print gen . generate ( ) else : print grammar
def validate_registration ( self ) : '''Checks if the device + username have already been registered with the bridge .'''
url = '/api/%s' % self . username response = self . make_request ( 'GET' , url ) if 'error' in response : return False return True
def kill_window ( pymux , variables ) : "Kill all panes in the current window ."
for pane in pymux . arrangement . get_active_window ( ) . panes : pymux . kill_pane ( pane )
def pop ( self ) : """Pop a reading off of this virtual stream and return it ."""
if self . reading is None : raise StreamEmptyError ( "Pop called on virtual stream walker without any data" , selector = self . selector ) reading = self . reading # If we ' re not a constant stream , we just exhausted ourselves if self . selector . match_type != DataStream . ConstantType : self . reading = None return reading
def checkState ( self , checkState ) : """Sets the data to given a Qt . CheckState ( Qt . Checked or Qt . Unchecked ) ."""
if checkState == Qt . Checked : logger . debug ( "BoolCti.checkState setting to True" ) self . data = True elif checkState == Qt . Unchecked : logger . debug ( "BoolCti.checkState setting to False" ) self . data = False else : raise ValueError ( "Unexpected check state: {!r}" . format ( checkState ) )
def unregister_file ( self , file ) : """Unregisters given file in the * * file _ system _ events _ manager * * . : param file : File . : type file : unicode : return : Method success . : rtype : bool"""
self . __engine . file_system_events_manager . is_path_registered ( file ) and self . __engine . file_system_events_manager . unregister_path ( file ) return True
def cleanup ( first_I , first_Z ) : """cleans up unbalanced steps failure can be from unbalanced final step , or from missing steps , this takes care of missing steps"""
cont = 0 Nmin = len ( first_I ) if len ( first_Z ) < Nmin : Nmin = len ( first_Z ) for kk in range ( Nmin ) : if first_I [ kk ] [ 0 ] != first_Z [ kk ] [ 0 ] : print ( "\n WARNING: " ) if first_I [ kk ] < first_Z [ kk ] : del first_I [ kk ] else : del first_Z [ kk ] print ( "Unmatched step number: " , kk + 1 , ' ignored' ) cont = 1 if cont == 1 : return first_I , first_Z , cont return first_I , first_Z , cont
def lapjv ( i , j , costs , wants_dual_variables = False , augmenting_row_reductions = 2 ) : '''Sparse linear assignment solution using Jonker - Volgenant algorithm i , j - similarly - sized vectors that pair the object at index i [ n ] with the object at index j [ j ] costs - a vector of similar size to i and j that is the cost of pairing i [ n ] with j [ n ] . wants _ dual _ variables - the dual problem reduces the costs using two vectors , u [ i ] and v [ j ] where the solution is the maximum value of np . sum ( u ) + np . sum ( v ) where cost [ i , j ] - u [ i ] - v [ j ] > = 0. Set wants _ dual _ variables to True to have u and v returned in addition to the assignments . augmenting _ row _ reductions - the authors suggest that augmenting row reduction be performed twice to optimize the u and v before the augmenting stage . The caller can choose a different number of reductions by supplying a different value here . All costs not appearing in i , j are taken as infinite . Each i in the range , 0 to max ( i ) must appear at least once and similarly for j . returns ( x , y ) , the pairs of assignments that represent the solution or ( x , y , u , v ) if the dual variables are requested .'''
import os i = np . atleast_1d ( i ) . astype ( int ) j = np . atleast_1d ( j ) . astype ( int ) costs = np . atleast_1d ( costs ) assert len ( i ) == len ( j ) , "i and j must be the same length" assert len ( i ) == len ( costs ) , "costs must be the same length as i" # Find the number of i with non - infinite cost for each j j_count = np . bincount ( j ) assert not np . any ( j_count == 0 ) , "all j must be paired with at least one i" # if you order anything by j , this is an index to the minimum for each j j_index = np . hstack ( [ [ 0 ] , np . cumsum ( j_count [ : - 1 ] ) ] ) # Likewise for i i_count = np . bincount ( i ) assert not np . any ( i_count == 0 ) , "all i must be paired with at least one j" i_index = np . hstack ( [ [ 0 ] , np . cumsum ( i_count [ : - 1 ] ) ] ) n = len ( j_count ) # dimension of the square cost matrix assert n == len ( i_count ) , "There must be the same number of unique i and j" # Variable initialization : # The output variables : # x - for each i , the assigned j . - 1 indicates uninitialized # y - for each j , the assigned i # u , v - the dual variables # A value of x = n or y = n means " unassigned " x = np . ascontiguousarray ( np . ones ( n , np . uint32 ) * n ) y = np . ascontiguousarray ( np . ones ( n , np . uint32 ) * n , np . uint32 ) u = np . ascontiguousarray ( np . zeros ( n , np . float64 ) ) # Column reduction # For a given j , find the i with the minimum cost . order = np . lexsort ( ( - i , costs , j ) ) min_idx = order [ j_index ] min_i = i [ min_idx ] # v [ j ] is assigned to the minimum cost over all i v = np . ascontiguousarray ( costs [ min_idx ] , np . float64 ) # Find the last j for which i was min _ i . x [ min_i ] = np . arange ( n ) . astype ( np . uint32 ) y [ x [ x != n ] ] = np . arange ( n ) . astype ( np . uint32 ) [ x != n ] # Three cases for i : # i is not the minimum of any j - i goes on free list # i is the minimum of one j - v [ j ] remains the same and y [ x [ j ] ] = i # i is the minimum of more than one j , perform reduction transfer assignment_count = np . bincount ( min_i [ min_i != n ] ) assignment_count = np . hstack ( ( assignment_count , np . zeros ( n - len ( assignment_count ) , int ) ) ) free_i = assignment_count == 0 one_i = assignment_count == 1 # order = np . lexsort ( ( costs , i ) ) Replace with this after all is done order = np . lexsort ( ( j , i ) ) j = np . ascontiguousarray ( j [ order ] , np . uint32 ) costs = np . ascontiguousarray ( costs [ order ] , np . float64 ) i_index = np . ascontiguousarray ( i_index , np . uint32 ) i_count = np . ascontiguousarray ( i_count , np . uint32 ) if np . any ( one_i ) : reduction_transfer ( np . ascontiguousarray ( np . argwhere ( one_i ) . flatten ( ) , np . uint32 ) , j , i_index , i_count , x , u , v , costs ) # Perform augmenting row reduction on unassigned i ii = np . ascontiguousarray ( np . argwhere ( free_i ) . flatten ( ) , np . uint32 ) if len ( ii ) > 0 : for iii in range ( augmenting_row_reductions ) : ii = augmenting_row_reduction ( n , ii , j , i_index , i_count , x , y , u , v , costs ) augment ( n , ii , j , i_index , i_count , x , y , u , v , costs ) if wants_dual_variables : return x , y , u , v else : return x , y
def find_replace ( self , node ) : """Try to find replace node for current node . Parameters node : docutil node Node to find replacement for . Returns nodes : node or list of node The replacement nodes of current node . Returns None if no replacement can be found ."""
newnode = None if isinstance ( node , nodes . Sequential ) : newnode = self . auto_toc_tree ( node ) elif isinstance ( node , nodes . literal_block ) : newnode = self . auto_code_block ( node ) elif isinstance ( node , nodes . literal ) : newnode = self . auto_inline_code ( node ) return newnode
def accept ( self , f , * args ) : """Like ' match ' , but consume the token ( tokenizer advances . )"""
match = self . match ( f , * args ) if match is None : return self . tokenizer . skip ( len ( match . tokens ) ) return match
def get_item_bank_id_metadata ( self ) : """get the metadata for item bank"""
metadata = dict ( self . _item_bank_id_metadata ) metadata . update ( { 'existing_id_values' : self . my_osid_object_form . _my_map [ 'itemBankId' ] } ) return Metadata ( ** metadata )
def describe_formatted ( self , name , database = None ) : """Retrieve results of DESCRIBE FORMATTED command . See Impala documentation for more . Parameters name : string Table name . Can be fully qualified ( with database ) database : string , optional"""
from ibis . impala . metadata import parse_metadata stmt = self . _table_command ( 'DESCRIBE FORMATTED' , name , database = database ) query = ImpalaQuery ( self , stmt ) result = query . execute ( ) # Leave formatting to pandas for c in result . columns : result [ c ] = result [ c ] . str . strip ( ) return parse_metadata ( result )
def do_blame ( self , subcmd , opts , * args ) : """Output the content of specified files or URLs with revision and author information in - line . usage : blame TARGET . . . $ { cmd _ option _ list }"""
print "'svn %s' opts: %s" % ( subcmd , opts ) print "'svn %s' args: %s" % ( subcmd , args )
def assoc ( self , index , value ) : '''Return a new vector with value associated at index . The implicit parameter is not modified .'''
newvec = ImmutableVector ( ) newvec . tree = self . tree . assoc ( index , value ) if index >= self . _length : newvec . _length = index + 1 else : newvec . _length = self . _length return newvec
def mirror ( self , axes = 'x' ) : """Generates a symmetry of the Surface respect global axes . : param axes : ' x ' , ' y ' , ' z ' , ' xy ' , ' xz ' , ' yz ' . . . : type axes : str : returns : ` ` pyny . Surface ` `"""
return Space ( Place ( self ) ) . mirror ( axes , inplace = False ) [ 0 ] . surface
def create_issue_comment ( self , body ) : """: calls : ` POST / repos / : owner / : repo / issues / : number / comments < http : / / developer . github . com / v3 / issues / comments > ` _ : param body : string : rtype : : class : ` github . IssueComment . IssueComment `"""
assert isinstance ( body , ( str , unicode ) ) , body post_parameters = { "body" : body , } headers , data = self . _requester . requestJsonAndCheck ( "POST" , self . issue_url + "/comments" , input = post_parameters ) return github . IssueComment . IssueComment ( self . _requester , headers , data , completed = True )
def get_grid ( self ) : """Standardize the layout of the table into grids"""
mentions , lines = _split_text_n_lines ( self . elems ) # Sort mentions in reading order where y values are snapped to half # height - sized grid mentions . sort ( key = lambda m : ( m . yc_grid , m . xc ) ) grid = Grid ( mentions , lines , self ) return grid
def build_backend ( tasks , default_host = ( '127.0.0.1' , DEFAULT_PORT ) , * args , ** kw ) : """Most of these args are passed directly to BackEnd ( ) . However , default _ host is used a bit differently . It should be a ( host , port ) pair , and may be overridden with cmdline args : file . py localhost file . py localhost 54545"""
host , port = default_host if len ( sys . argv ) > 1 : host = sys . argv [ 1 ] if len ( sys . argv ) > 2 : port = sys . argv [ 2 ] return BackEnd ( tasks , host , port , * args , ** kw )
def _splitit ( self , line , isheader ) : """Split each element of line to fit the column width Each element is turned into a list , result of the wrapping of the string to the desired width"""
line_wrapped = [ ] for cell , width in zip ( line , self . _width ) : array = [ ] for c in cell . split ( '\n' ) : if c . strip ( ) == "" : array . append ( "" ) else : array . extend ( textwrapper ( c , width ) ) line_wrapped . append ( array ) max_cell_lines = reduce ( max , list ( map ( len , line_wrapped ) ) ) for cell , valign in zip ( line_wrapped , self . _valign ) : if isheader : valign = "t" if valign == "m" : missing = max_cell_lines - len ( cell ) cell [ : 0 ] = [ "" ] * int ( missing / 2 ) cell . extend ( [ "" ] * int ( missing / 2 + missing % 2 ) ) elif valign == "b" : cell [ : 0 ] = [ "" ] * ( max_cell_lines - len ( cell ) ) else : cell . extend ( [ "" ] * ( max_cell_lines - len ( cell ) ) ) return line_wrapped
def _parse_var_scalar ( self , X : scalar_type ) -> dict : """Unpack the numpy array and bind each column to one of the variables in self . var _ names Returns inferred dict of variable : val pairs"""
arg_vars = { } # there is only one var name , so this will run once and no sorting is required for var_name in self . var_names : arg_vars [ var_name ] = X return arg_vars
def transform_index_to_physical_point ( image , index ) : """Get spatial point from index of an image . ANTsR function : ` antsTransformIndexToPhysicalPoint ` Arguments img : ANTsImage image to get values from index : list or tuple or numpy . ndarray location in image Returns tuple Example > > > import ants > > > import numpy as np > > > img = ants . make _ image ( ( 10,10 ) , np . random . randn ( 100 ) ) > > > pt = ants . transform _ index _ to _ physical _ point ( img , ( 2,2 ) )"""
if not isinstance ( image , iio . ANTsImage ) : raise ValueError ( 'image must be ANTsImage type' ) if isinstance ( index , np . ndarray ) : index = index . tolist ( ) if not isinstance ( index , ( tuple , list ) ) : raise ValueError ( 'index must be tuple or list' ) if len ( index ) != image . dimension : raise ValueError ( 'len(index) != image.dimension' ) index = [ i + 1 for i in index ] ndim = image . dimension ptype = image . pixeltype libfn = utils . get_lib_fn ( 'TransformIndexToPhysicalPoint%s%i' % ( utils . short_ptype ( ptype ) , ndim ) ) point = libfn ( image . pointer , [ list ( index ) ] ) return np . array ( point [ 0 ] )
def wfind ( self , tag_name , params = None , fn = None , case_sensitive = False ) : """This methods works same as : meth : ` find ` , but only in one level of the : attr : ` childs ` . This allows to chain : meth : ` wfind ` calls : : > > > dom = dhtmlparser . parseString ( ' ' ' . . . < root > . . . < some > . . . < something > . . . < xe id = " wanted xe " / > . . . < / something > . . . < something > . . . asd . . . < / something > . . . < xe id = " another xe " / > . . . < / some > . . . < some > . . . else . . . < xe id = " yet another xe " / > . . . < / some > . . . < / root > > > > xe = dom . wfind ( " root " ) . wfind ( " some " ) . wfind ( " something " ) . find ( " xe " ) > > > xe [ < dhtmlparser . htmlelement . HTMLElement object at 0x8a979ac > ] > > > str ( xe [ 0 ] ) ' < xe id = " wanted xe " / > ' Args : tag _ name ( str ) : Name of the tag you are looking for . Set to " " if you wish to use only ` fn ` parameter . params ( dict , default None ) : Parameters which have to be present in tag to be considered matching . fn ( function , default None ) : Use this function to match tags . Function expects one parameter which is HTMLElement instance . case _ sensitive ( bool , default False ) : Use case sensitive search . Returns : obj : Blank HTMLElement with all matches in : attr : ` childs ` property . Note : Returned element also have set : attr : ` _ container ` property to True ."""
childs = self . childs if self . _container : # container object childs = map ( lambda x : x . childs , filter ( lambda x : x . childs , self . childs ) ) childs = sum ( childs , [ ] ) # flattern the list el = self . __class__ ( ) # HTMLElement ( ) el . _container = True for child in childs : if child . isEndTag ( ) : continue if child . isAlmostEqual ( tag_name , params , fn , case_sensitive ) : el . childs . append ( child ) return el
def chmod ( target ) : """Recursively set the chmod for files to 0600 and 0700 for folders . It ' s ok unless we need something more specific . Args : target ( str ) : Root file or folder"""
assert isinstance ( target , str ) assert os . path . exists ( target ) file_mode = stat . S_IRUSR | stat . S_IWUSR folder_mode = stat . S_IRUSR | stat . S_IWUSR | stat . S_IXUSR # Remove the immutable attribute recursively if there is one remove_immutable_attribute ( target ) if os . path . isfile ( target ) : os . chmod ( target , file_mode ) elif os . path . isdir ( target ) : # chmod the root item os . chmod ( target , folder_mode ) # chmod recursively in the folder it it ' s one for root , dirs , files in os . walk ( target ) : for cur_dir in dirs : os . chmod ( os . path . join ( root , cur_dir ) , folder_mode ) for cur_file in files : os . chmod ( os . path . join ( root , cur_file ) , file_mode ) else : raise ValueError ( "Unsupported file type: {}" . format ( target ) )
async def on_raw_notice ( self , message ) : """Modify NOTICE to redirect CTCP messages ."""
nick , metadata = self . _parse_user ( message . source ) target , msg = message . params if is_ctcp ( msg ) : self . _sync_user ( nick , metadata ) type , response = parse_ctcp ( msg ) # Find dedicated handler if it exists . attr = 'on_ctcp_' + pydle . protocol . identifierify ( type ) + '_reply' if hasattr ( self , attr ) : await getattr ( self , attr ) ( user , target , response ) # Invoke global handler . await self . on_ctcp_reply ( user , target , type , response ) else : await super ( ) . on_raw_notice ( message )
def delete ( self ) : """Destructor ."""
if self . minisat : pysolvers . minisat22_del ( self . minisat ) self . minisat = None
def Update ( self , env , args = None ) : """Update an environment with the option variables . env - the environment to update ."""
values = { } # first set the defaults : for option in self . options : if not option . default is None : values [ option . key ] = option . default # next set the value specified in the options file for filename in self . files : if os . path . exists ( filename ) : dir = os . path . split ( os . path . abspath ( filename ) ) [ 0 ] if dir : sys . path . insert ( 0 , dir ) try : values [ '__name__' ] = filename with open ( filename , 'r' ) as f : contents = f . read ( ) exec ( contents , { } , values ) finally : if dir : del sys . path [ 0 ] del values [ '__name__' ] # set the values specified on the command line if args is None : args = self . args for arg , value in args . items ( ) : added = False for option in self . options : if arg in list ( option . aliases ) + [ option . key ] : values [ option . key ] = value added = True if not added : self . unknown [ arg ] = value # put the variables in the environment : # ( don ' t copy over variables that are not declared as options ) for option in self . options : try : env [ option . key ] = values [ option . key ] except KeyError : pass # Call the convert functions : for option in self . options : if option . converter and option . key in values : value = env . subst ( '${%s}' % option . key ) try : try : env [ option . key ] = option . converter ( value ) except TypeError : env [ option . key ] = option . converter ( value , env ) except ValueError as x : raise SCons . Errors . UserError ( 'Error converting option: %s\n%s' % ( option . key , x ) ) # Finally validate the values : for option in self . options : if option . validator and option . key in values : option . validator ( option . key , env . subst ( '${%s}' % option . key ) , env )
def get_output ( self , module_name ) : """Return the output of the named module . This will be a list ."""
output = [ ] module_info = self . _get_module_info ( module_name ) if module_info : output = module_info [ "module" ] . get_latest ( ) # we do a deep copy so that any user does not change the actual output # of the module . return deepcopy ( output )
def solve ( self , max_worlds = 10000 , silent = False ) : """find the best world to make people happy"""
self . num_worlds = 0 num_unhappy = 0 for tax_rate in range ( self . tax_range [ 0 ] , self . tax_range [ 1 ] ) : for equity in range ( self . equity_range [ 0 ] , self . equity_range [ 1 ] ) : for tradition in range ( self . tradition_range [ 0 ] , self . tradition_range [ 1 ] ) : self . num_worlds += 1 if self . num_worlds > max_worlds : break w = World ( str ( self . num_worlds ) . zfill ( 6 ) , [ 5000 , tax_rate / 10 , tradition / 10 , equity / 10 ] ) world_happiness = 0 num_unhappy = 0 for person in self . all_people : wh = Happiness ( person , w ) world_happiness += wh . rating if wh . rating < 0 : num_unhappy += 1 if world_happiness > self . net_happiness : self . net_happiness = world_happiness self . unhappy_people = num_unhappy if not silent : print ( 'found better world - ' + w . nme + ' = ' + str ( world_happiness ) + ' - total unhappy_people = ' + str ( self . unhappy_people ) )
def _create_h5_file ( in_paths , new_path ) : """Function to create a new . h5 file that contains the copy of the contents of the input file ( s ) . in _ paths : str or list If the input is a string , it is assumed that the two signals are in the same file , else , if the input is a list , it is assumed that the two signals are in different file ( the list should contain the paths to the two files ) . new _ path : str The path to create the new file . ( default : ' sync _ file . h5 ' ) Returns new _ file : h5py Object Object of the h5py package containing the new file containing the copy of the contents of the input file ( s ) ."""
if type ( in_paths ) == str : in_paths = [ in_paths ] new_file = File ( new_path , 'w' ) for i , in_path in enumerate ( in_paths ) : with File ( in_path , 'r' ) as file : for key in list ( file . keys ( ) ) : file . copy ( source = file [ key ] , dest = new_file , name = key ) return new_file
def remove_authentication ( self , auth_name = None , organization = None ) : """Remove the current authentication or the one given by ` auth _ name `"""
if auth_name : if organization : url = '%s/authentications/org/%s/name/%s' % ( self . domain , organization , auth_name ) else : url = '%s/authentications/name/%s' % ( self . domain , auth_name ) else : url = '%s/authentications' % ( self . domain , ) res = self . session . delete ( url ) self . _check_response ( res , [ 201 ] )
def change_svc_notification_timeperiod ( self , service , notification_timeperiod ) : """Change service notification timeperiod Format of the line that triggers function call : : CHANGE _ SVC _ NOTIFICATION _ TIMEPERIOD ; < host _ name > ; < service _ description > ; < notification _ timeperiod > : param service : service to edit : type service : alignak . objects . service . Service : param notification _ timeperiod : timeperiod to set : type notification _ timeperiod : alignak . objects . timeperiod . Timeperiod : return : None"""
service . modified_attributes |= DICT_MODATTR [ "MODATTR_NOTIFICATION_TIMEPERIOD" ] . value service . notification_period = notification_timeperiod self . send_an_element ( service . get_update_status_brok ( ) )
def getServiceModuleName ( self ) : '''return module name .'''
name = GetModuleBaseNameFromWSDL ( self . wsdl ) if not name : raise WsdlGeneratorError , 'could not determine a service name' if self . server_module_suffix is None : return name return '%s%s' % ( name , self . server_module_suffix )
def _bfd_tx ( self , ** kwargs ) : """Return the BFD minimum transmit interval XML . You should not use this method . You probably want ` BGP . bfd ` . Args : min _ tx ( str ) : BFD transmit interval in milliseconds ( 300 , 500 , etc ) delete ( bool ) : Remove the configuration if ` ` True ` ` . Returns : XML to be passed to the switch . Raises : None"""
int_type = kwargs [ 'int_type' ] method_name = 'interface_%s_bfd_interval_min_tx' % int_type bfd_tx = getattr ( self . _interface , method_name ) config = bfd_tx ( ** kwargs ) if kwargs [ 'delete' ] : tag = 'min-tx' config . find ( './/*%s' % tag ) . set ( 'operation' , 'delete' ) return config
def load_manifest_file ( client , bucket , schema , versioned , ifilters , key_info ) : """Given an inventory csv file , return an iterator over keys"""
# To avoid thundering herd downloads , we do an immediate yield for # interspersed i / o yield None # Inline these values to avoid the local var lookup , they are constants # rKey = schema [ ' Key ' ] # 1 # rIsLatest = schema [ ' IsLatest ' ] # 3 # rVersionId = schema [ ' VersionId ' ] # 2 with tempfile . NamedTemporaryFile ( ) as fh : client . download_fileobj ( Bucket = bucket , Key = key_info [ 'key' ] , Fileobj = fh ) fh . seek ( 0 ) reader = csv . reader ( gzip . GzipFile ( fileobj = fh , mode = 'r' ) ) for key_set in chunks ( reader , 1000 ) : keys = [ ] for kr in key_set : k = kr [ 1 ] if inventory_filter ( ifilters , schema , kr ) : continue k = unquote_plus ( k ) if versioned : if kr [ 3 ] == 'true' : keys . append ( ( k , kr [ 2 ] , True ) ) else : keys . append ( ( k , kr [ 2 ] ) ) else : keys . append ( k ) yield keys
def linez ( self , lines ) : """Creates a POLYLINEZ shape . Lines is a collection of lines , each made up of a list of xyzm values . If the z ( elevation ) value is not included , it defaults to 0. If the m ( measure ) value is not included , it defaults to None ( NoData ) ."""
shapeType = POLYLINEZ self . _shapeparts ( parts = lines , shapeType = shapeType )
def main ( unused_argv ) : """Run an agent ."""
stopwatch . sw . enabled = FLAGS . profile or FLAGS . trace stopwatch . sw . trace = FLAGS . trace map_inst = maps . get ( FLAGS . map ) agent_classes = [ ] players = [ ] agent_module , agent_name = FLAGS . agent . rsplit ( "." , 1 ) agent_cls = getattr ( importlib . import_module ( agent_module ) , agent_name ) agent_classes . append ( agent_cls ) players . append ( sc2_env . Agent ( sc2_env . Race [ FLAGS . agent_race ] , FLAGS . agent_name or agent_name ) ) if map_inst . players >= 2 : if FLAGS . agent2 == "Bot" : players . append ( sc2_env . Bot ( sc2_env . Race [ FLAGS . agent2_race ] , sc2_env . Difficulty [ FLAGS . difficulty ] ) ) else : agent_module , agent_name = FLAGS . agent2 . rsplit ( "." , 1 ) agent_cls = getattr ( importlib . import_module ( agent_module ) , agent_name ) agent_classes . append ( agent_cls ) players . append ( sc2_env . Agent ( sc2_env . Race [ FLAGS . agent2_race ] , FLAGS . agent2_name or agent_name ) ) threads = [ ] for _ in range ( FLAGS . parallel - 1 ) : t = threading . Thread ( target = run_thread , args = ( agent_classes , players , FLAGS . map , False ) ) threads . append ( t ) t . start ( ) run_thread ( agent_classes , players , FLAGS . map , FLAGS . render ) for t in threads : t . join ( ) if FLAGS . profile : print ( stopwatch . sw )
def save ( self , * args , ** kwargs ) : """Saves model and set initial state ."""
super ( ModelDiffMixin , self ) . save ( * args , ** kwargs ) self . __initial = self . _dict
def transform ( self , X ) : """Select continuous features and transform them using PCA . Parameters X : numpy ndarray , { n _ samples , n _ components } New data , where n _ samples is the number of samples and n _ components is the number of components . Returns array - like , { n _ samples , n _ components }"""
selected = auto_select_categorical_features ( X , threshold = self . threshold ) _ , X_sel , n_selected , _ = _X_selected ( X , selected ) if n_selected == 0 : # No features selected . raise ValueError ( 'No continuous feature was found!' ) else : pca = PCA ( svd_solver = self . svd_solver , iterated_power = self . iterated_power , random_state = self . random_state ) return pca . fit_transform ( X_sel )
def _ttm_me_compute ( self , V , edims , sdims , transp ) : """Assume Y = T x _ i V _ i for i = 1 . . . n can fit into memory"""
shapeY = np . copy ( self . shape ) # Determine size of Y for n in np . union1d ( edims , sdims ) : shapeY [ n ] = V [ n ] . shape [ 1 ] if transp else V [ n ] . shape [ 0 ] # Allocate Y ( final result ) and v ( vectors for elementwise computations ) Y = zeros ( shapeY ) shapeY = array ( shapeY ) v = [ None for _ in range ( len ( edims ) ) ] for i in range ( np . prod ( shapeY [ edims ] ) ) : rsubs = unravel_index ( shapeY [ edims ] , i )
def global_cmdline_values ( ) : """Returns a dictionary of global command line arguments ( computed with : py : func : ` global _ cmdline _ args ` ) to their current values . The returnd dictionary is cached . Example : . . code - block : : python global _ cmdline _ values ( ) # - > { " core _ local _ scheduler " : True }"""
global _global_cmdline_values if _global_cmdline_values : return _global_cmdline_values luigi_parser = luigi . cmdline_parser . CmdlineParser . get_instance ( ) if not luigi_parser : return None # go through all actions of the full luigi parser and compare option strings # with the global cmdline args parser = full_parser ( ) global_args = global_cmdline_args ( ) _global_cmdline_values = { } for action in parser . _actions : if any ( arg in action . option_strings for arg in global_args ) : _global_cmdline_values [ action . dest ] = getattr ( luigi_parser . known_args , action . dest ) return _global_cmdline_values
def validate_param_completion ( self , param , leftover_args ) : """validates that a param should be completed"""
# validates param starts with unfinished word completes = self . validate_completion ( param ) # show parameter completions when started full_param = self . unfinished_word . startswith ( "--" ) and param . startswith ( "--" ) char_param = self . unfinished_word . startswith ( "-" ) and not param . startswith ( "--" ) # show full parameters before any are used new_param = not self . unfinished_word and not leftover_args and param . startswith ( "--" ) # checks for parameters already in the line as well as aliases no_doubles = True command_doubles = self . command_param_info . get ( self . current_command , { } ) for alias in command_doubles . get ( param , [ ] ) : if alias in leftover_args : no_doubles = False return completes and no_doubles and any ( ( full_param , char_param , new_param ) )
def part_studio_stl ( self , did , wid , eid ) : '''Exports STL export from a part studio Args : - did ( str ) : Document ID - wid ( str ) : Workspace ID - eid ( str ) : Element ID Returns : - requests . Response : Onshape response data'''
req_headers = { 'Accept' : 'application/vnd.onshape.v1+octet-stream' } return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl' , headers = req_headers )
def RGB_to_HSL ( cobj , * args , ** kwargs ) : """Converts from RGB to HSL . H values are in degrees and are 0 to 360. S values are a percentage , 0.0 to 1.0. L values are a percentage , 0.0 to 1.0."""
var_R = cobj . rgb_r var_G = cobj . rgb_g var_B = cobj . rgb_b var_max = max ( var_R , var_G , var_B ) var_min = min ( var_R , var_G , var_B ) var_H = __RGB_to_Hue ( var_R , var_G , var_B , var_min , var_max ) var_L = 0.5 * ( var_max + var_min ) if var_max == var_min : var_S = 0 elif var_L <= 0.5 : var_S = ( var_max - var_min ) / ( 2.0 * var_L ) else : var_S = ( var_max - var_min ) / ( 2.0 - ( 2.0 * var_L ) ) return HSLColor ( var_H , var_S , var_L )
def get_build_tool_version ( self ) : """Gets the build tool version to be used by zipalign from build . gradle file . Returns : A string containing the build tool version , default is 23.0.2."""
with open ( '%s/%s/build.gradle' % ( self . path , self . src_folder ) ) as f : for line in f . readlines ( ) : if 'buildToolsVersion' in line : matches = re . findall ( r'buildToolsVersion \"(.+?)\"' , line ) if len ( matches ) == 1 : return matches [ 0 ] return config . build_tool_version
def fix_paths ( self , paths ) : """Fix the filenames in the iterable paths Returns : old2new : Mapping old _ path - - > new _ path"""
old2new , fixed_exts = { } , [ ] for path in list_strings ( paths ) : newpath , ext = self . _fix_path ( path ) if newpath is not None : # if ext not in fixed _ exts : # if ext = = " 1WF " : continue # raise ValueError ( " Unknown extension % s " % ext ) # print ( ext , path , fixed _ exts ) # if ext ! = ' 1WF ' : # assert ext not in fixed _ exts if ext not in fixed_exts : if ext == "1WF" : continue raise ValueError ( "Unknown extension %s" % ext ) fixed_exts . append ( ext ) old2new [ path ] = newpath return old2new
def pivot ( self , column_ ) : """增加对于多列的支持"""
if isinstance ( column_ , str ) : try : return self . data . reset_index ( ) . pivot ( index = 'datetime' , columns = 'code' , values = column_ ) except : return self . data . reset_index ( ) . pivot ( index = 'date' , columns = 'code' , values = column_ ) elif isinstance ( column_ , list ) : try : return self . data . reset_index ( ) . pivot_table ( index = 'datetime' , columns = 'code' , values = column_ ) except : return self . data . reset_index ( ) . pivot_table ( index = 'date' , columns = 'code' , values = column_ )
def find_path ( self , start , end , grid ) : """find a path from start to end node on grid by iterating over all neighbors of a node ( see check _ neighbors ) : param start : start node : param end : end node : param grid : grid that stores all possible steps / tiles as 2D - list : return :"""
self . start_time = time . time ( ) # execution time limitation self . runs = 0 # count number of iterations start . opened = True open_list = [ start ] while len ( open_list ) > 0 : self . runs += 1 self . keep_running ( ) path = self . check_neighbors ( start , end , grid , open_list ) if path : return path , self . runs # failed to find path return [ ] , self . runs
def drop_dims ( self , drop_dims ) : """Drop dimensions and associated variables from this dataset . Parameters drop _ dims : str or list Dimension or dimensions to drop . Returns obj : Dataset The dataset without the given dimensions ( or any variables containing those dimensions )"""
if utils . is_scalar ( drop_dims ) : drop_dims = [ drop_dims ] missing_dimensions = [ d for d in drop_dims if d not in self . dims ] if missing_dimensions : raise ValueError ( 'Dataset does not contain the dimensions: %s' % missing_dimensions ) drop_vars = set ( k for k , v in self . _variables . items ( ) for d in v . dims if d in drop_dims ) variables = OrderedDict ( ( k , v ) for k , v in self . _variables . items ( ) if k not in drop_vars ) coord_names = set ( k for k in self . _coord_names if k in variables ) return self . _replace_with_new_dims ( variables , coord_names )
def find_output_at_time ( self , ifo , time ) : '''Return File that covers the given time . Parameters ifo : string Name of the ifo ( or ifos ) that the File should correspond to time : int / float / LIGOGPStime Return the Files that covers the supplied time . If no File covers the time this will return None . Returns list of File classes The Files that corresponds to the time .'''
# Get list of Files that overlap time , for given ifo outFiles = [ i for i in self if ifo in i . ifo_list and time in i . segment_list ] if len ( outFiles ) == 0 : # No OutFile at this time return None elif len ( outFiles ) == 1 : # 1 OutFile at this time ( good ! ) return outFiles else : # Multiple output files . Currently this is valid , but we may want # to demand exclusivity later , or in certain cases . Hence the # separation . return outFiles
def portrait_image ( model , request ) : """XXX : needs polishing . Return configured default portrait if not set on user ."""
response = Response ( ) cfg = ugm_general ( model ) response . body = model . attrs [ cfg . attrs [ 'users_portrait_attr' ] ] response . headers [ 'Content-Type' ] = 'image/jpeg' response . headers [ 'Cache-Control' ] = 'max-age=0' return response
def get_policy_version ( policy_name , version_id , region = None , key = None , keyid = None , profile = None ) : '''Check to see if policy exists . CLI Example : . . code - block : : bash salt myminion boto _ iam . instance _ profile _ exists myiprofile'''
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile ) try : ret = conn . get_policy_version ( _get_policy_arn ( policy_name , region = region , key = key , keyid = keyid , profile = profile ) , version_id ) retval = ret . get ( 'get_policy_version_response' , { } ) . get ( 'get_policy_version_result' , { } ) . get ( 'policy_version' , { } ) retval [ 'document' ] = _unquote ( retval . get ( 'document' ) ) return { 'policy_version' : retval } except boto . exception . BotoServerError : return None
def extend_shapes ( df_shapes , axis , distance ) : '''Extend shape / polygon outline away from polygon center point by absolute distance .'''
df_shapes_i = df_shapes . copy ( ) offsets = df_shapes_i [ axis + '_center_offset' ] . copy ( ) offsets [ offsets < 0 ] -= distance offsets [ offsets >= 0 ] += distance df_shapes_i [ axis ] = df_shapes_i [ axis + '_center' ] + offsets return df_shapes_i
def start_subscribe ( self ) : """Create a new Subscription context manager ."""
if not self . conn : raise ValueError ( 'Not connected' ) elif not self . pubsub_conn : raise ValueError ( 'PubSub not enabled' ) # creates a new context manager return Subscription ( self )
def request ( self , _cache_key , _cache_ignore , _cache_timeout , ** kwargs ) : """This is a wrapper function that handles the caching of the request . See DefaultHandler . with _ cache for reference ."""
if _cache_key : # Pop the request ' s session cookies from the cache key . # These appear to be unreliable and change with every # request . Also , with the introduction of OAuth I don ' t think # that cookies are being used to store anything that # differentiates API requests anyways url , items = _cache_key _cache_key = ( url , ( items [ 0 ] , items [ 1 ] , items [ 3 ] , items [ 4 ] ) ) if kwargs [ 'request' ] . method != 'GET' : # I added this check for RTV , I have no idea why PRAW would ever # want to cache POST / PUT / DELETE requests _cache_ignore = True if _cache_ignore : return self . _request ( ** kwargs ) self . _clear_timeouts ( _cache_timeout ) if _cache_key in self . cache : return self . cache [ _cache_key ] result = self . _request ( ** kwargs ) # The handlers don ' t call ` raise _ for _ status ` so we need to ignore # status codes that will result in an exception that should not be # cached . if result . status_code not in ( 200 , 302 ) : return result self . timeouts [ _cache_key ] = timer ( ) self . cache [ _cache_key ] = result return result
def magicquil ( f ) : """Decorator to enable a more convenient syntax for writing quil programs . With this decorator there is no need to keep track of a Program object and regular Python if / else branches can be used for classical control flow . Example usage : @ magicquil def fast _ reset ( q1 ) : reg1 = MEASURE ( q1 , None ) if reg1: X ( q1) else : I ( q1) my _ program = fast _ reset ( 0 ) # this will be a Program object"""
rewritten_function = _rewrite_function ( f ) @ functools . wraps ( f ) def wrapper ( * args , ** kwargs ) : if _program_context . get ( None ) is not None : rewritten_function ( * args , ** kwargs ) program = _program_context . get ( ) else : token = _program_context . set ( Program ( ) ) rewritten_function ( * args , ** kwargs ) program = _program_context . get ( ) _program_context . reset ( token ) return program return wrapper
def log_text ( self , text , ** kw ) : """Add a text entry to be logged during : meth : ` commit ` . : type text : str : param text : the text entry : type kw : dict : param kw : ( optional ) additional keyword arguments for the entry . See : class : ` ~ google . cloud . logging . entries . LogEntry ` ."""
self . entries . append ( TextEntry ( payload = text , ** kw ) )
def init_app ( self , app = None ) : """Initialize application configuration"""
config = getattr ( app , 'config' , app ) self . team_id = config . get ( 'TEAM_ID' )
def insert ( self , index , value ) : """Insert an instance of User into the collection ."""
self . check ( value ) self . _user_list . insert ( index , value )
def _mark_candidate_indexes ( lines , candidate ) : """Mark candidate indexes with markers Markers : * c - line that could be a signature line * l - long line * d - line that starts with dashes but has other chars as well > > > _ mark _ candidate _ lines ( [ ' Some text ' , ' ' , ' - ' , ' Bob ' ] , [ 0 , 2 , 3 ] ) ' cdc '"""
# at first consider everything to be potential signature lines markers = list ( 'c' * len ( candidate ) ) # mark lines starting from bottom up for i , line_idx in reversed ( list ( enumerate ( candidate ) ) ) : if len ( lines [ line_idx ] . strip ( ) ) > TOO_LONG_SIGNATURE_LINE : markers [ i ] = 'l' else : line = lines [ line_idx ] . strip ( ) if line . startswith ( '-' ) and line . strip ( "-" ) : markers [ i ] = 'd' return "" . join ( markers )
def _get_env ( self , config ) : """Read environment variables based on the settings defined in the defaults . These are expected to be upper - case versions of the actual setting names , prefixed by ` ` SCRAPEKIT _ ` ` ."""
for option , value in config . items ( ) : env_name = 'SCRAPEKIT_%s' % option . upper ( ) value = os . environ . get ( env_name , value ) config [ option ] = value return config
def close ( self ) : """Close the pickle file , and the zip archive file . The single zip archive file can now be shipped around to be loaded by the unpickler ."""
if self . file is None : return # Close the pickle file . self . file . close ( ) self . file = None for f in self . mark_for_delete : error = [ False ] def register_error ( * args ) : error [ 0 ] = True _shutil . rmtree ( f , onerror = register_error ) if error [ 0 ] : _atexit . register ( _shutil . rmtree , f , ignore_errors = True )
def get_vhost_names ( self ) : """A convenience function for getting back only the vhost names instead of the larger vhost dicts . : returns list vhost _ names : A list of just the vhost names ."""
vhosts = self . get_all_vhosts ( ) vhost_names = [ i [ 'name' ] for i in vhosts ] return vhost_names
def delete_file ( self , uri , purge = False ) : """Delete file . uri - - MediaFire file URI Keyword arguments : purge - - delete the file without sending it to Trash ."""
try : resource = self . get_resource_by_uri ( uri ) except ResourceNotFoundError : # Nothing to remove return None if not isinstance ( resource , File ) : raise ValueError ( "File expected, got {}" . format ( type ( resource ) ) ) if purge : func = self . api . file_purge else : func = self . api . file_delete return func ( resource [ 'quickkey' ] )
def winapi ( context , names ) : """Query Win32 API declarations . Windows database must be prepared before using this ."""
logging . info ( _ ( 'Entering winapi mode' ) ) sense = context . obj [ 'sense' ] none = True for name in names : code = sense . query_args ( name ) if code : none = False print ( stylify_code ( code ) ) else : logging . warning ( _ ( 'Function not found: %s' ) , name ) sys . exit ( 1 if none else 0 )
def _get ( self , * args , ** kwargs ) : """Wrapper around Requests for GET requests Returns : Response : A Requests Response object"""
if 'timeout' not in kwargs : kwargs [ 'timeout' ] = self . timeout req = self . session . get ( * args , ** kwargs ) return req
def _expand_to_beam_size ( data , beam_size , batch_size , state_info = None ) : """Tile all the states to have batch _ size * beam _ size on the batch axis . Parameters data : A single NDArray / Symbol or nested container with NDArrays / Symbol Each NDArray / Symbol should have shape ( N , . . . ) when state _ info is None , or same as the layout in state _ info when it ' s not None . beam _ size : int Beam size batch _ size : int Batch size state _ info : Nested structure of dictionary , default None . Descriptors for states , usually from decoder ' s ` ` state _ info ( ) ` ` . When None , this method assumes that the batch axis is the first dimension . Returns new _ states : Object that contains NDArrays / Symbols Each NDArray / Symbol should have shape batch _ size * beam _ size on the batch axis ."""
assert not state_info or isinstance ( state_info , ( type ( data ) , dict ) ) , 'data and state_info doesn\'t match, ' 'got: {} vs {}.' . format ( type ( state_info ) , type ( data ) ) if isinstance ( data , list ) : if not state_info : state_info = [ None ] * len ( data ) return [ _expand_to_beam_size ( d , beam_size , batch_size , s ) for d , s in zip ( data , state_info ) ] elif isinstance ( data , tuple ) : if not state_info : state_info = [ None ] * len ( data ) state_info = tuple ( state_info ) return tuple ( _expand_to_beam_size ( d , beam_size , batch_size , s ) for d , s in zip ( data , state_info ) ) elif isinstance ( data , dict ) : if not state_info : state_info = { k : None for k in data . keys ( ) } return { k : _expand_to_beam_size ( v , beam_size , batch_size , state_info [ k ] ) for k , v in data . items ( ) } elif isinstance ( data , mx . nd . NDArray ) : if not state_info : batch_axis = 0 else : batch_axis = state_info [ '__layout__' ] . find ( 'N' ) if data . shape [ batch_axis ] != batch_size : raise ValueError ( 'The batch dimension of all the inner elements in states must be ' '{}, Found shape={}' . format ( batch_size , data . shape ) ) new_shape = list ( data . shape ) new_shape [ batch_axis ] = batch_size * beam_size new_shape = tuple ( new_shape ) return data . expand_dims ( batch_axis + 1 ) . broadcast_axes ( axis = batch_axis + 1 , size = beam_size ) . reshape ( new_shape ) elif isinstance ( data , mx . sym . Symbol ) : if not state_info : batch_axis = 0 else : batch_axis = state_info [ '__layout__' ] . find ( 'N' ) new_shape = ( 0 , ) * batch_axis + ( - 3 , - 2 ) return data . expand_dims ( batch_axis + 1 ) . broadcast_axes ( axis = batch_axis + 1 , size = beam_size ) . reshape ( new_shape ) else : raise NotImplementedError
def _hasCredentials ( self ) : """Return True , if credentials is given"""
cred = self . options . get ( 'credentials' ) return ( cred and 'clientId' in cred and 'accessToken' in cred and cred [ 'clientId' ] and cred [ 'accessToken' ] )
def _add_path ( dir_name , payload_info_list ) : """Add a key with the path to each payload _ info _ dict ."""
for payload_info_dict in payload_info_list : file_name = payload_info_dict [ 'filename' ] or payload_info_dict [ 'pid' ] payload_info_dict [ 'path' ] = d1_common . utils . filesystem . gen_safe_path ( dir_name , 'data' , file_name )
def draw_to_notebook ( layers , ** kwargs ) : """Draws a network diagram in an IPython notebook : parameters : - layers : list or NeuralNet instance List of layers or the neural net to draw . - * * kwargs : see the docstring of make _ pydot _ graph for other options"""
from IPython . display import Image layers = ( layers . get_all_layers ( ) if hasattr ( layers , 'get_all_layers' ) else layers ) dot = make_pydot_graph ( layers , ** kwargs ) return Image ( dot . create_png ( ) )
def _hdparm ( args , failhard = True ) : '''Execute hdparm Fail hard when required return output when possible'''
cmd = 'hdparm {0}' . format ( args ) result = __salt__ [ 'cmd.run_all' ] ( cmd ) if result [ 'retcode' ] != 0 : msg = '{0}: {1}' . format ( cmd , result [ 'stderr' ] ) if failhard : raise CommandExecutionError ( msg ) else : log . warning ( msg ) return result [ 'stdout' ]
def setDragData ( self , data , x = None , y = None ) : """Sets the drag data for this chart item to the inputed data . : param data | < QMimeData > | | None"""
self . _dragData [ ( x , y ) ] = data
async def _process_ack ( self , msg ) : """Receives acks from the publishes via the _ STAN . acks subscription ."""
pub_ack = protocol . PubAck ( ) pub_ack . ParseFromString ( msg . data ) # Unblock pending acks queue if required . if not self . _pending_pub_acks_queue . empty ( ) : await self . _pending_pub_acks_queue . get ( ) try : cb = self . _pub_ack_map [ pub_ack . guid ] await cb ( pub_ack ) del self . _pub_ack_map [ pub_ack . guid ] except KeyError : # Just skip the pub ack return except : # TODO : Check for protocol error return
def construct_oauth_url ( self ) : """Constructs verifier OAuth URL"""
response = self . _requester ( requests . head , "{0}://{1}/" . format ( self . protocol , self . client . server ) , allow_redirects = False ) if response . is_redirect : server = response . headers [ 'location' ] else : server = response . url path = "oauth/authorize?oauth_token={token}" . format ( token = self . store [ "oauth-request-token" ] ) return "{server}{path}" . format ( server = server , path = path )
def find_water_flow ( self , world , water_path ) : """Find the flow direction for each cell in heightmap"""
# iterate through each cell for x in range ( world . width - 1 ) : for y in range ( world . height - 1 ) : # search around cell for a direction path = self . find_quick_path ( [ x , y ] , world ) if path : tx , ty = path flow_dir = [ tx - x , ty - y ] key = 0 for direction in DIR_NEIGHBORS_CENTER : if direction == flow_dir : water_path [ y , x ] = key key += 1
def gpg_stash_key ( appname , key_bin , config_dir = None , gpghome = None ) : """Store a key locally to our app keyring . Does NOT put it into a blockchain ID Return the key ID on success Return None on error"""
assert is_valid_appname ( appname ) key_bin = str ( key_bin ) assert len ( key_bin ) > 0 if gpghome is None : config_dir = get_config_dir ( config_dir ) keydir = make_gpg_home ( appname , config_dir = config_dir ) else : keydir = gpghome gpg = gnupg . GPG ( homedir = keydir ) res = gpg . import_keys ( key_bin ) try : assert res . count == 1 , "Failed to store key (%s)" % res except AssertionError , e : log . exception ( e ) log . error ( "Failed to store key to %s" % keydir ) log . debug ( "res: %s" % res . __dict__ ) log . debug ( "(%s)\n%s" % ( len ( key_bin ) , key_bin ) ) return None return res . fingerprints [ 0 ]
def draw ( self , ** kwargs ) : """Renders the rfecv curve ."""
# Compute the curves x = self . n_feature_subsets_ means = self . cv_scores_ . mean ( axis = 1 ) sigmas = self . cv_scores_ . std ( axis = 1 ) # Plot one standard deviation above and below the mean self . ax . fill_between ( x , means - sigmas , means + sigmas , alpha = 0.25 ) # Plot the curve self . ax . plot ( x , means , 'o-' ) # Plot the maximum number of features self . ax . axvline ( self . n_features_ , c = 'k' , ls = '--' , label = "n_features = {}\nscore = {:0.3f}" . format ( self . n_features_ , self . cv_scores_ . mean ( axis = 1 ) . max ( ) ) ) return self . ax
def num_mutations ( self ) : ''': return : number of mutations in the container'''
self . _initialize ( ) res = super ( Container , self ) . num_mutations ( ) return res
def _special_dbkey_maps ( dbkey , ref_file ) : """Avoid duplicate VEP information for databases with chromosome differences like hg19 / GRCh37."""
remaps = { "hg19" : "GRCh37" , "hg38-noalt" : "hg38" } if dbkey in remaps : base_dir = os . path . normpath ( os . path . join ( os . path . dirname ( ref_file ) , os . pardir ) ) vep_dir = os . path . normpath ( os . path . join ( base_dir , "vep" ) ) other_dir = os . path . relpath ( os . path . normpath ( os . path . join ( base_dir , os . pardir , remaps [ dbkey ] , "vep" ) ) , base_dir ) if os . path . exists ( os . path . join ( base_dir , other_dir ) ) : if not os . path . lexists ( vep_dir ) : os . symlink ( other_dir , vep_dir ) return vep_dir else : return None else : return None
def set_default_proxy ( cls , value ) : """Default : None ( no proxy ) A string that will be used to tell each request must be sent through this proxy server . Use the scheme : / / hostname : port form . If you need to use a proxy , you can configure individual requests with the proxies argument to any request method ."""
if value is None : cls . DEFAULT_PROXY = None else : scheme , host , port = get_hostname_parameters_from_url ( value ) cls . DEFAULT_PROXY = "%s://%s:%s" % ( scheme , host , port )
def parse_magnitude ( self , magnitude_str ) : """Converts magnitude field to a float value , or ` ` None ` ` if GCVS does not list the magnitude . Returns a tuple ( magnitude , symbol ) , where symbol can be either an empty string or a single character - one of ' < ' , ' > ' , ' ( ' ."""
symbol = magnitude_str [ 0 ] . strip ( ) magnitude = magnitude_str [ 1 : 6 ] . strip ( ) return float ( magnitude ) if magnitude else None , symbol