signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def init_from_wave_file ( wavpath ) : """Init a sonic visualiser environment structure based the analysis of the main audio file . The audio file have to be encoded in wave Args : wavpath ( str ) : the full path to the wavfile"""
try : samplerate , data = SW . read ( wavpath ) nframes = data . shape [ 0 ] except : # scipy cannot handle 24 bit wav files # and wave cannot handle 32 bit wav files try : w = wave . open ( wavpath ) samplerate = w . getframerate ( ) nframes = w . getnframes ( ) except : raise Exception ( 'Cannot decode wavefile ' + wavpath ) return SVEnv ( samplerate , nframes , wavpath )
def _format_exception_message ( e ) : """Formats the specified exception ."""
# Prevent duplication of " AppError " in places that print " AppError " # and then this formatted string if isinstance ( e , dxpy . AppError ) : return _safe_unicode ( e ) if USING_PYTHON2 : return unicode ( e . __class__ . __name__ , 'utf-8' ) + ": " + _safe_unicode ( e ) else : return e . __class__ . __name__ + ": " + _safe_unicode ( e )
def clean_scope ( self ) : """The scope is assembled by combining all the set flags into a single integer value which we can later check again for set bits . If * no * scope is set , we return the default scope which is the first defined scope in : attr : ` provider . constants . SCOPES ` ."""
default = SCOPES [ 0 ] [ 0 ] flags = self . cleaned_data . get ( 'scope' , [ ] ) return scope . to_int ( default = default , * flags )
def chmod_native ( path , mode_expression , recursive = False ) : """This is ugly and will only work on POSIX , but the built - in Python os . chmod support is very minimal , and neither supports fast recursive chmod nor " + X " type expressions , both of which are slow for large trees . So just shell out ."""
popenargs = [ "chmod" ] if recursive : popenargs . append ( "-R" ) popenargs . append ( mode_expression ) popenargs . append ( path ) subprocess . check_call ( popenargs )
def get_content_type ( self ) : """Returns the Content Type to serve from either the extension or the Accept headers . Uses the : attr : ` EXTENSION _ MAP ` list for all the configured MIME types ."""
extension = self . path_params . get ( '_extension' ) for ext , mime in self . EXTENSION_MAP : if ext == extension : return mime # Else : use the Accept headers if self . response . vary is None : self . response . vary = [ 'Accept' ] else : self . response . vary . append ( 'Accept' ) types = [ mime for ext , mime in self . EXTENSION_MAP ] ct = self . request . accept . best_match ( types ) # No best match found . The specification allows us to either return a # 406 or just use another format in this case . # We pick the default format , though that may become a configurable # behavior in the future . if not ct : ct = types [ 0 ] return ct
def install_os_snaps ( snaps , refresh = False ) : """Install OpenStack snaps from channel and with mode @ param snaps : Dictionary of snaps with channels and modes of the form : { ' snap _ name ' : { ' channel ' : ' snap _ channel ' , ' mode ' : ' snap _ mode ' } } Where channel is a snapstore channel and mode is - - classic , - - devmode or - - jailmode . @ param post _ snap _ install : Callback function to run after snaps have been installed"""
def _ensure_flag ( flag ) : if flag . startswith ( '--' ) : return flag return '--{}' . format ( flag ) if refresh : for snap in snaps . keys ( ) : snap_refresh ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) ) else : for snap in snaps . keys ( ) : snap_install ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) )
def to_dict ( self ) : """Get a dictionary of the attributes of this Language object , which can be useful for constructing a similar object ."""
if self . _dict is not None : return self . _dict result = { } for key in self . ATTRIBUTES : value = getattr ( self , key ) if value : result [ key ] = value self . _dict = result return result
def wet_records_from_file_obj ( f , take_ownership = False ) : """Iterate through records in WET file object ."""
while True : record = WETRecord . read ( f ) if record is None : break if not record . url : continue yield record if take_ownership : f . close ( )
def get_object ( self , * args , ** kwargs ) : """Should memoize the object to avoid multiple query if get _ object is used many times in the view"""
self . category_instance = get_object_or_404 ( Category , slug = self . kwargs [ 'category_slug' ] ) return get_object_or_404 ( Post , thread__id = self . kwargs [ 'thread_id' ] , thread__category = self . category_instance , pk = self . kwargs [ 'post_id' ] )
def predict ( self , timeseriesX , n , m ) : """Calculates the dependent timeseries Y for the given parameters and independent timeseries . ( y = m * x + n ) : param TimeSeries timeseriesX : the independent Timeseries . : param float n : The interception with the x access that has been calculated during regression : param float m : The slope of the function that has been calculated during regression : return TimeSeries timeseries _ y : the predicted values for the dependent TimeSeries . Its length and first dimension will equal to timeseriesX ."""
new_entries = [ ] for entry in timeseriesX : predicted_value = m * entry [ 1 ] + n new_entries . append ( [ entry [ 0 ] , predicted_value ] ) return TimeSeries . from_twodim_list ( new_entries )
def update_translation ( self , context_id , translation_id , static_ip = None , remote_ip = None , notes = None ) : """Updates an address translation entry using the given values . : param int context _ id : The id - value representing the context instance . : param dict template : A key - value mapping of translation properties . : param string static _ ip : The static IP address value to update . : param string remote _ ip : The remote IP address value to update . : param string notes : The notes value to update . : return bool : True if the update was successful ."""
translation = self . get_translation ( context_id , translation_id ) if static_ip is not None : translation [ 'internalIpAddress' ] = static_ip translation . pop ( 'internalIpAddressId' , None ) if remote_ip is not None : translation [ 'customerIpAddress' ] = remote_ip translation . pop ( 'customerIpAddressId' , None ) if notes is not None : translation [ 'notes' ] = notes self . context . editAddressTranslation ( translation , id = context_id ) return True
def repack_archive ( archive , archive_new , verbosity = 0 , interactive = True ) : """Repack archive to different file and / or format ."""
util . check_existing_filename ( archive ) util . check_new_filename ( archive_new ) if verbosity >= 0 : util . log_info ( "Repacking %s to %s ..." % ( archive , archive_new ) ) res = _repack_archive ( archive , archive_new , verbosity = verbosity , interactive = interactive ) if verbosity >= 0 : util . log_info ( "... repacking successful." ) return res
def get_extended_summaryf ( self , * args , ** kwargs ) : """Extract the extended summary from a function docstring This function can be used as a decorator to extract the extended summary of a function docstring ( similar to : meth : ` get _ sectionsf ` ) . Parameters ` ` * args ` ` and ` ` * * kwargs ` ` See the : meth : ` get _ extended _ summary ` method . Note , that the first argument will be the docstring of the specified function Returns function Wrapper that takes a function as input and registers its summary via the : meth : ` get _ extended _ summary ` method"""
def func ( f ) : doc = f . __doc__ self . get_extended_summary ( doc or '' , * args , ** kwargs ) return f return func
def get_user_home ( self , user ) : """Returns the default URL for a particular user . This method can be used to customize where a user is sent when they log in , etc . By default it returns the value of : meth : ` get _ absolute _ url ` . An alternative function can be supplied to customize this behavior by specifying a either a URL or a function which returns a URL via the ` ` " user _ home " ` ` key in ` ` HORIZON _ CONFIG ` ` . Each of these would be valid : : { " user _ home " : " / home " , } # A URL { " user _ home " : " my _ module . get _ user _ home " , } # Path to a function { " user _ home " : lambda user : " / " + user . name , } # A function { " user _ home " : None , } # Will always return the default dashboard This can be useful if the default dashboard may not be accessible to all users . When user _ home is missing from HORIZON _ CONFIG , it will default to the settings . LOGIN _ REDIRECT _ URL value ."""
user_home = self . _conf [ 'user_home' ] if user_home : if callable ( user_home ) : return user_home ( user ) elif isinstance ( user_home , six . string_types ) : # Assume we ' ve got a URL if there ' s a slash in it if '/' in user_home : return user_home else : mod , func = user_home . rsplit ( "." , 1 ) return getattr ( import_module ( mod ) , func ) ( user ) # If it ' s not callable and not a string , it ' s wrong . raise ValueError ( 'The user_home setting must be either a string ' 'or a callable object (e.g. a function).' ) else : return self . get_absolute_url ( )
def docgen ( ) : """Build documentation ."""
hitchpylibrarytoolkit . docgen ( _storybook ( { } ) , DIR . project , DIR . key / "story" , DIR . gen )
def status ( self ) : """Collects the instances state and returns a list . . . important : : Molecule assumes all instances were created successfully by Ansible , otherwise Ansible would return an error on create . This may prove to be a bad assumption . However , configuring Molecule ' s driver to match the options passed to the playbook may prove difficult . Especially in cases where the user is provisioning instances off localhost . : returns : list"""
status_list = [ ] for platform in self . _config . platforms . instances : instance_name = platform [ 'name' ] driver_name = self . name provisioner_name = self . _config . provisioner . name scenario_name = self . _config . scenario . name status_list . append ( Status ( instance_name = instance_name , driver_name = driver_name , provisioner_name = provisioner_name , scenario_name = scenario_name , created = self . _created ( ) , converged = self . _converged ( ) , ) ) return status_list
def outbound_message_filter ( f ) : """Register the decorated function as a service - level outbound message filter . : raise TypeError : if the decorated object is a coroutine function . . seealso : : : class : ` StanzaStream ` for important remarks regarding the use of stanza filters ."""
if asyncio . iscoroutinefunction ( f ) : raise TypeError ( "outbound_message_filter must not be a coroutine function" ) add_handler_spec ( f , HandlerSpec ( ( _apply_outbound_message_filter , ( ) ) ) , ) return f
def _from_dict ( cls , _dict ) : """Initialize a ToneCategory object from a json dictionary ."""
args = { } if 'tones' in _dict : args [ 'tones' ] = [ ToneScore . _from_dict ( x ) for x in ( _dict . get ( 'tones' ) ) ] else : raise ValueError ( 'Required property \'tones\' not present in ToneCategory JSON' ) if 'category_id' in _dict : args [ 'category_id' ] = _dict . get ( 'category_id' ) else : raise ValueError ( 'Required property \'category_id\' not present in ToneCategory JSON' ) if 'category_name' in _dict : args [ 'category_name' ] = _dict . get ( 'category_name' ) else : raise ValueError ( 'Required property \'category_name\' not present in ToneCategory JSON' ) return cls ( ** args )
def ResolveHostnameToIP ( host , port ) : """Resolves a hostname to an IP address ."""
ip_addrs = socket . getaddrinfo ( host , port , socket . AF_UNSPEC , 0 , socket . IPPROTO_TCP ) # getaddrinfo returns tuples ( family , socktype , proto , canonname , sockaddr ) . # We are interested in sockaddr which is in turn a tuple # ( address , port ) for IPv4 or ( address , port , flow info , scope id ) # for IPv6 . In both cases , we want the first element , the address . result = ip_addrs [ 0 ] [ 4 ] [ 0 ] # TODO : In Python 2 , this value is a byte string instead of UTF - 8 # string . To ensure type correctness until support for Python 2 is dropped , # we always decode this value . if compatibility . PY2 : result = result . decode ( "ascii" ) return result
def scalar ( name , data , step = None , description = None ) : """Write a scalar summary . Arguments : name : A name for this summary . The summary tag used for TensorBoard will be this name prefixed by any active name scopes . data : A real numeric scalar value , convertible to a ` float32 ` Tensor . step : Explicit ` int64 ` - castable monotonic step value for this summary . If omitted , this defaults to ` tf . summary . experimental . get _ step ( ) ` , which must not be None . description : Optional long - form description for this summary , as a constant ` str ` . Markdown is supported . Defaults to empty . Returns : True on success , or false if no summary was written because no default summary writer was available . Raises : ValueError : if a default writer exists , but no step was provided and ` tf . summary . experimental . get _ step ( ) ` is None ."""
summary_metadata = metadata . create_summary_metadata ( display_name = None , description = description ) # TODO ( https : / / github . com / tensorflow / tensorboard / issues / 2109 ) : remove fallback summary_scope = ( getattr ( tf . summary . experimental , 'summary_scope' , None ) or tf . summary . summary_scope ) with summary_scope ( name , 'scalar_summary' , values = [ data , step ] ) as ( tag , _ ) : tf . debugging . assert_scalar ( data ) return tf . summary . write ( tag = tag , tensor = tf . cast ( data , tf . float32 ) , step = step , metadata = summary_metadata )
def _open_sheet ( self ) : """Read the sheet , get value the header , get number columns and rows : return :"""
if self . sheet_name and not self . header : self . _sheet = self . _file . worksheet ( self . sheet_name . title ) self . ncols = self . _sheet . col_count self . nrows = self . _sheet . row_count for i in range ( 1 , self . ncols + 1 ) : self . header = self . header + [ self . _sheet . cell ( 1 , i ) . value ]
def deploy ( self , environment , target_name , stream_output = None ) : """Return True if deployment was successful"""
try : remote_server_command ( [ "rsync" , "-lrv" , "--safe-links" , "--munge-links" , "--delete" , "--inplace" , "--chmod=ugo=rwX" , "--exclude=.datacats-environment" , "--exclude=.git" , "/project/." , environment . deploy_target + ':' + target_name ] , environment , self , include_project_dir = True , stream_output = stream_output , clean_up = True , ) except WebCommandError as e : raise DatacatsError ( "Unable to deploy `{0}` to remote server for some reason:\n" " datacats was not able to copy data to the remote server" . format ( ( target_name , ) ) , parent_exception = e ) try : remote_server_command ( [ "ssh" , environment . deploy_target , "install" , target_name , ] , environment , self , clean_up = True , ) return True except WebCommandError as e : raise DatacatsError ( "Unable to deploy `{0}` to remote server for some reason:\n" "datacats copied data to the server but failed to register\n" "(or `install`) the new catalog" . format ( ( target_name , ) ) , parent_exception = e )
def _agg_bake ( cls , vertices , color , closed = False ) : """Bake a list of 2D vertices for rendering them as thick line . Each line segment must have its own vertices because of antialias ( this means no vertex sharing between two adjacent line segments ) ."""
n = len ( vertices ) P = np . array ( vertices ) . reshape ( n , 2 ) . astype ( float ) idx = np . arange ( n ) # used to eventually tile the color array dx , dy = P [ 0 ] - P [ - 1 ] d = np . sqrt ( dx * dx + dy * dy ) # If closed , make sure first vertex = last vertex ( + / - epsilon = 1e - 10) if closed and d > 1e-10 : P = np . append ( P , P [ 0 ] ) . reshape ( n + 1 , 2 ) idx = np . append ( idx , idx [ - 1 ] ) n += 1 V = np . zeros ( len ( P ) , dtype = cls . _agg_vtype ) V [ 'a_position' ] = P # Tangents & norms T = P [ 1 : ] - P [ : - 1 ] N = np . sqrt ( T [ : , 0 ] ** 2 + T [ : , 1 ] ** 2 ) # T / = N . reshape ( len ( T ) , 1) V [ 'a_tangents' ] [ + 1 : , : 2 ] = T V [ 'a_tangents' ] [ 0 , : 2 ] = T [ - 1 ] if closed else T [ 0 ] V [ 'a_tangents' ] [ : - 1 , 2 : ] = T V [ 'a_tangents' ] [ - 1 , 2 : ] = T [ 0 ] if closed else T [ - 1 ] # Angles T1 = V [ 'a_tangents' ] [ : , : 2 ] T2 = V [ 'a_tangents' ] [ : , 2 : ] A = np . arctan2 ( T1 [ : , 0 ] * T2 [ : , 1 ] - T1 [ : , 1 ] * T2 [ : , 0 ] , T1 [ : , 0 ] * T2 [ : , 0 ] + T1 [ : , 1 ] * T2 [ : , 1 ] ) V [ 'a_angles' ] [ : - 1 , 0 ] = A [ : - 1 ] V [ 'a_angles' ] [ : - 1 , 1 ] = A [ + 1 : ] # Segment L = np . cumsum ( N ) V [ 'a_segment' ] [ + 1 : , 0 ] = L V [ 'a_segment' ] [ : - 1 , 1 ] = L # V [ ' a _ lengths ' ] [ : , 2 ] = L [ - 1] # Step 1 : A - - B - - C = > A - - B , B ' - - C V = np . repeat ( V , 2 , axis = 0 ) [ 1 : - 1 ] V [ 'a_segment' ] [ 1 : ] = V [ 'a_segment' ] [ : - 1 ] V [ 'a_angles' ] [ 1 : ] = V [ 'a_angles' ] [ : - 1 ] V [ 'a_texcoord' ] [ 0 : : 2 ] = - 1 V [ 'a_texcoord' ] [ 1 : : 2 ] = + 1 idx = np . repeat ( idx , 2 ) [ 1 : - 1 ] # Step 2 : A - - B , B ' - - C - > A0 / A1 - - B0 / B1 , B ' 0 / B ' 1 - - C0 / C1 V = np . repeat ( V , 2 , axis = 0 ) V [ 'a_texcoord' ] [ 0 : : 2 , 1 ] = - 1 V [ 'a_texcoord' ] [ 1 : : 2 , 1 ] = + 1 idx = np . repeat ( idx , 2 ) I = np . resize ( np . array ( [ 0 , 1 , 2 , 1 , 2 , 3 ] , dtype = np . uint32 ) , ( n - 1 ) * ( 2 * 3 ) ) I += np . repeat ( 4 * np . arange ( n - 1 , dtype = np . uint32 ) , 6 ) # Length V [ 'alength' ] = L [ - 1 ] * np . ones ( len ( V ) ) # Color if color . ndim == 1 : color = np . tile ( color , ( len ( V ) , 1 ) ) elif color . ndim == 2 and len ( color ) == n : color = color [ idx ] else : raise ValueError ( 'Color length %s does not match number of ' 'vertices %s' % ( len ( color ) , n ) ) V [ 'color' ] = color return V , I
def get_filenames ( root , prefix = u'' , suffix = u'' ) : """Function for listing filenames with given prefix and suffix in the root directory . Parameters prefix : str The prefix of the required files . suffix : str The suffix of the required files Returns list of str List of filenames matching the prefix and suffix criteria ."""
return [ fnm for fnm in os . listdir ( root ) if fnm . startswith ( prefix ) and fnm . endswith ( suffix ) ]
def _init_structures ( self , data , subjects ) : """Initializes data structures for SRM and preprocess the data . Parameters data : list of 2D arrays , element i has shape = [ voxels _ i , samples ] Each element in the list contains the fMRI data of one subject . subjects : int The total number of subjects in ` data ` . Returns x : list of array , element i has shape = [ voxels _ i , samples ] Demeaned data for each subject . mu : list of array , element i has shape = [ voxels _ i ] Voxel means over samples , per subject . rho2 : array , shape = [ subjects ] Noise variance : math : ` \\ rho ^ 2 ` per subject . trace _ xtx : array , shape = [ subjects ] The squared Frobenius norm of the demeaned data in ` x ` ."""
x = [ ] mu = [ ] rho2 = np . zeros ( subjects ) trace_xtx = np . zeros ( subjects ) for subject in range ( subjects ) : mu . append ( np . mean ( data [ subject ] , 1 ) ) rho2 [ subject ] = 1 trace_xtx [ subject ] = np . sum ( data [ subject ] ** 2 ) x . append ( data [ subject ] - mu [ subject ] [ : , np . newaxis ] ) return x , mu , rho2 , trace_xtx
def nan_empty ( self , col : str ) : """Fill empty values with NaN values : param col : name of the colum : type col : str : example : ` ` ds . nan _ empty ( " mycol " ) ` `"""
try : self . df [ col ] = self . df [ col ] . replace ( '' , nan ) self . ok ( "Filled empty values with nan in column " + col ) except Exception as e : self . err ( e , "Can not fill empty values with nan" )
def args ( self ) : """Parse args if they have not already been parsed and return the Namespace for args . . . Note : : Accessing args should only be done directly in the App . Returns : ( namespace ) : ArgParser parsed arguments ."""
if not self . _parsed : # only resolve once self . _default_args , unknown = self . parser . parse_known_args ( ) # when running locally retrieve any args from the results . tc file . when running in # platform this is done automatically . self . _results_tc_args ( ) # log unknown arguments only once self . _unknown_args ( unknown ) # set parsed bool to ensure args are only parsed once self . _parsed = True # update args with value from config data or configuration file self . args_update ( ) return self . _default_args
def _mean_image_subtraction ( image , means , num_channels ) : """Subtracts the given means from each image channel . For example : means = [ 123.68 , 116.779 , 103.939] image = _ mean _ image _ subtraction ( image , means ) Note that the rank of ` image ` must be known . Args : image : a tensor of size [ height , width , C ] . means : a C - vector of values to subtract from each channel . num _ channels : number of color channels in the image that will be distorted . Returns : the centered image . Raises : ValueError : If the rank of ` image ` is unknown , if ` image ` has a rank other than three or if the number of channels in ` image ` doesn ' t match the number of values in ` means ` ."""
if image . get_shape ( ) . ndims != 3 : raise ValueError ( 'Input must be of size [height, width, C>0]' ) if len ( means ) != num_channels : raise ValueError ( 'len(means) must match the number of channels' ) mlperf_log . resnet_print ( key = mlperf_log . INPUT_MEAN_SUBTRACTION , value = means ) # We have a 1 - D tensor of means ; convert to 3 - D . means = tf . expand_dims ( tf . expand_dims ( means , 0 ) , 0 ) return image - means
def lF_oneway ( * lists ) : """Performs a 1 - way ANOVA , returning an F - value and probability given any number of groups . From Heiman , pp . 394-7. Usage : F _ oneway ( * lists ) where * lists is any number of lists , one per treatment group Returns : F value , one - tailed p - value"""
a = len ( lists ) # ANOVA on ' a ' groups , each in it ' s own list means = [ 0 ] * a vars = [ 0 ] * a ns = [ 0 ] * a alldata = [ ] tmp = [ N . array ( _ ) for _ in lists ] means = [ amean ( _ ) for _ in tmp ] vars = [ avar ( _ ) for _ in tmp ] ns = [ len ( _ ) for _ in lists ] for i in range ( len ( lists ) ) : alldata = alldata + lists [ i ] alldata = N . array ( alldata ) bign = len ( alldata ) sstot = ass ( alldata ) - ( asquare_of_sums ( alldata ) / float ( bign ) ) ssbn = 0 for list in lists : ssbn = ssbn + asquare_of_sums ( N . array ( list ) ) / float ( len ( list ) ) ssbn = ssbn - ( asquare_of_sums ( alldata ) / float ( bign ) ) sswn = sstot - ssbn dfbn = a - 1 dfwn = bign - a msb = ssbn / float ( dfbn ) msw = sswn / float ( dfwn ) f = msb / msw prob = fprob ( dfbn , dfwn , f ) return f , prob
def parameter_distribution ( self , parameter , bp , bins = 30 , merge = False , merge_method = 'mean' , masked = False ) : """To get the parameter distribution of either a specific base - pair / step or a DNA segment over the MD simulation . parameters parameter : str Name of a base - pair or base - step or helical parameter For details about accepted keywords , see ` ` parameter ` ` in the method : meth : ` DNA . get _ parameters ` . bp : 1D list or array base - pairs to analyze Example : : : bp = [ 6 ] # merge = False bp = [ 4,15 ] # merge = True bins int Number of bins to calculate histogram merge : bool ` ` Default = False ` ` : As shown above , if ` ` True ` ` , bp should a list of range otherwise a list of single value . If ` ` bp = True ` ` , the parameter for the respective DNA segment could be merged or calculated by ` ` merge _ method ` ` . merge _ method : str Method to calculate the parameter of a DNA segment from local parameters of all base - pairs / steps that are between the range given through ` ` bp ` ` . Currently accepted keywords are as follows : * ` ` merge _ method = mean ` ` : Average of local parameters * ` ` merge _ method = sum ` ` : Sum of local parameters masked : bool ` ` Default = False ` ` . To skip specific frames / snapshots . ` ` DNA . mask ` ` array should be set to use this functionality . This array contains boolean ( either ` ` True ` ` or ` ` False ` ` ) value for each frame to mask the frames . Presently , mask array is automatically generated during : meth : ` DNA . generate _ smooth _ axis ` to skip those frames where 3D fitting curve was not successful within the given criteria . Returns values : 1D array Array containing parameter values density : 1D array Array containing density for respective parameter values"""
if not ( isinstance ( bp , list ) or isinstance ( bp , np . ndarray ) ) : raise AssertionError ( "type %s is not list or np.ndarray" % type ( bp ) ) if ( len ( bp ) > 1 ) and ( merge == False ) : raise AssertionError ( "bp %s contains more than two values, whereas merge=False. Use either one value in bp or merge=True" % bp ) exit ( 1 ) if len ( bp ) == 1 : merge = False if ( merge == True ) and not ( ( merge_method == 'mean' ) or ( merge_method == 'sum' ) ) : raise AssertionError ( "merge method %s is not available." % merge_method ) exit ( 1 ) if len ( bp ) == 1 : param_value , bp_idx = self . get_parameters ( parameter , bp , bp_range = False , masked = masked ) else : param_value , bp_idx = self . get_parameters ( parameter , bp , bp_range = True , masked = masked ) if ( merge == True ) and ( merge_method == 'mean' ) : param_value = np . mean ( param_value , axis = 0 ) elif ( merge == True ) and ( merge_method == 'sum' ) : param_value = np . sum ( param_value , axis = 0 ) else : param_value = param_value [ 0 ] density , bin_edges = np . histogram ( param_value , bins = bins , density = True ) bin_width = bin_edges [ 1 ] - bin_edges [ 0 ] density = np . insert ( density , 0 , 0.0 ) density = np . append ( density , 0.0 ) values = [ ] for i in range ( len ( bin_edges ) - 1 ) : values . append ( ( bin_edges [ i ] + bin_edges [ i + 1 ] ) / 2 ) values = np . asarray ( values ) values = np . append ( values , values [ - 1 ] + bin_width ) values = np . insert ( values , 0 , values [ 0 ] - bin_width ) return np . array ( values ) , density
async def find_movie ( self , query ) : """Retrieve movie data by search query . Arguments : query ( : py : class : ` str ` ) : Query to search for . Returns : : py : class : ` list ` : Possible matches ."""
params = OrderedDict ( [ ( 'query' , query ) , ( 'include_adult' , False ) , ] ) url = self . url_builder ( 'search/movie' , { } , params ) data = await self . get_data ( url ) if data is None : return return [ Movie . from_json ( item , self . config [ 'data' ] . get ( 'images' ) ) for item in data . get ( 'results' , [ ] ) ]
def purview_state ( self , direction ) : """The state of the purview when we are computing coefficients in ` ` direction ` ` . For example , if we are computing the cause coefficient of a mechanism in ` ` after _ state ` ` , the direction is ` ` CAUSE ` ` and the ` ` purview _ state ` ` is ` ` before _ state ` ` ."""
return { Direction . CAUSE : self . before_state , Direction . EFFECT : self . after_state } [ direction ]
def _get_instance_repo ( self , namespace ) : """Returns the instance repository for the specified CIM namespace within the mock repository . This is the original instance variable , so any modifications will change the mock repository . Validates that the namespace exists in the mock repository . If the instance repository does not contain the namespace yet , it is added . Parameters : namespace ( : term : ` string ` ) : Namespace name . Must not be ` None ` . Returns : list of CIMInstance : Instance repository . Raises : : exc : ` ~ pywbem . CIMError ` : CIM _ ERR _ INVALID _ NAMESPACE : Namespace does not exist ."""
self . _validate_namespace ( namespace ) if namespace not in self . instances : self . instances [ namespace ] = [ ] return self . instances [ namespace ]
def mul ( x , y , context = None ) : """Return ` ` x ` ` times ` ` y ` ` ."""
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_mul , ( BigFloat . _implicit_convert ( x ) , BigFloat . _implicit_convert ( y ) , ) , context , )
def checkArgs ( args ) : """Checks the arguments and options . : param args : a object containing the options of the program . : type args : argparse . Namespace : returns : ` ` True ` ` if everything was OK . If there is a problem with an option , an exception is raised using the : py : class : ` ProgramError ` class , a message is printed to the : class : ` sys . stderr ` and the program exists with code 1."""
# Check if we have the tped and the tfam files required_file_extensions = { ".tfam" , ".tped" } if args . is_bfile : required_file_extensions = { ".bed" , ".bim" , ".fam" } for fileName in [ args . ifile + i for i in required_file_extensions ] : if not os . path . isfile ( fileName ) : msg = "{}: no such file" . format ( fileName ) raise ProgramError ( msg ) # Check the mind option ( between 0 and 1 , inclusive ) if ( args . mind < 0 ) or ( args . mind > 1 ) : msg = "mind=%f: must be between 0 and 1 (inclusive)" % args . mind raise ProgramError ( msg ) return True
def disable_component ( self , component ) : """Force a component to be disabled . : param component : can be a class or an instance ."""
if not isinstance ( component , type ) : component = component . __class__ self . enabled [ component ] = False self . components [ component ] = None
def get_ancestors ( self , limit : int , header : BlockHeader ) -> Tuple [ BaseBlock , ... ] : """Return ` limit ` number of ancestor blocks from the current canonical head ."""
ancestor_count = min ( header . block_number , limit ) # We construct a temporary block object vm_class = self . get_vm_class_for_block_number ( header . block_number ) block_class = vm_class . get_block_class ( ) block = block_class ( header = header , uncles = [ ] ) ancestor_generator = iterate ( compose ( self . get_block_by_hash , operator . attrgetter ( 'parent_hash' ) , operator . attrgetter ( 'header' ) , ) , block ) # we peel off the first element from the iterator which will be the # temporary block object we constructed . next ( ancestor_generator ) return tuple ( take ( ancestor_count , ancestor_generator ) )
def read ( self , size = - 1 ) : """Read up to size uncompressed bytes from the file . If size is negative or omitted , read until EOF is reached . Returns b " " if the file is already at EOF ."""
self . _check_can_read ( ) if size is None : # This is not needed on Python 3 where the comparison to zeo # will fail with a TypeError . raise TypeError ( "Read size should be an integer, not None" ) if self . _mode == _MODE_READ_EOF or size == 0 : return b"" elif size < 0 : return self . _read_all ( ) else : return self . _read_block ( size )
def hyperparameters ( self ) : """Return hyperparameters used by your custom TensorFlow code during model training ."""
hyperparameters = super ( RLEstimator , self ) . hyperparameters ( ) additional_hyperparameters = { SAGEMAKER_OUTPUT_LOCATION : self . output_path , # TODO : can be applied to all other estimators SAGEMAKER_ESTIMATOR : SAGEMAKER_ESTIMATOR_VALUE } hyperparameters . update ( Framework . _json_encode_hyperparameters ( additional_hyperparameters ) ) return hyperparameters
def wrap_udf ( hdfs_file , inputs , output , so_symbol , name = None ) : """Creates a callable scalar function object . Must be created in Impala to be used Parameters hdfs _ file : . so file that contains relevant UDF inputs : list of strings or sig . TypeSignature Input types to UDF output : string Ibis data type so _ symbol : string , C + + function name for relevant UDF name : string ( optional ) . Used internally to track function Returns container : UDF object"""
func = ImpalaUDF ( inputs , output , so_symbol , name = name , lib_path = hdfs_file ) return func
def set_tag ( self , tag ) : '''Sets the tag . If the Entity belongs to the world it will check for tag conflicts .'''
if self . _world : if self . _world . get_entity_by_tag ( tag ) : raise NonUniqueTagError ( tag ) self . _tag = tag
def pov ( self , color : chess . Color ) -> "Score" : """Get the score from the point of view of the given * color * ."""
return self . relative if self . turn == color else - self . relative
def availability_sets_list_available_sizes ( name , resource_group , ** kwargs ) : # pylint : disable = invalid - name '''. . versionadded : : 2019.2.0 List all available virtual machine sizes that can be used to to create a new virtual machine in an existing availability set . : param name : The availability set name to list available virtual machine sizes within . : param resource _ group : The resource group name to list available availability set sizes within . CLI Example : . . code - block : : bash salt - call azurearm _ compute . availability _ sets _ list _ available _ sizes testset testgroup'''
result = { } compconn = __utils__ [ 'azurearm.get_client' ] ( 'compute' , ** kwargs ) try : sizes = __utils__ [ 'azurearm.paged_object_to_list' ] ( compconn . availability_sets . list_available_sizes ( resource_group_name = resource_group , availability_set_name = name ) ) for size in sizes : result [ size [ 'name' ] ] = size except CloudError as exc : __utils__ [ 'azurearm.log_cloud_error' ] ( 'compute' , str ( exc ) , ** kwargs ) result = { 'error' : str ( exc ) } return result
def exists_orm ( session : Session , ormclass : DeclarativeMeta , * criteria : Any ) -> bool : """Detects whether a database record exists for the specified ` ` ormclass ` ` and ` ` criteria ` ` . Example usage : . . code - block : : python bool _ exists = exists _ orm ( session , MyClass , MyClass . myfield = = value )"""
# http : / / docs . sqlalchemy . org / en / latest / orm / query . html q = session . query ( ormclass ) for criterion in criteria : q = q . filter ( criterion ) exists_clause = q . exists ( ) return bool_from_exists_clause ( session = session , exists_clause = exists_clause )
def add_user_invitation ( self , ** kwargs ) : """Add a UserInvitation object , with properties specified in ` ` * * kwargs ` ` ."""
user_invitation = self . UserInvitationClass ( ** kwargs ) self . db_adapter . add_object ( user_invitation ) return user_invitation
async def close ( self ) -> None : """Complete queued queries / cursors and close the connection ."""
await self . _execute ( self . _conn . close ) self . _running = False self . _connection = None
def _convert ( self , value ) : """Returns a PasswordHash from the given string . PasswordHash instances or None values will return unchanged . Strings will be hashed and the resulting PasswordHash returned . Any other input will result in a TypeError ."""
if isinstance ( value , PasswordHash ) : return value elif isinstance ( value , str ) : value = value . encode ( 'utf-8' ) return PasswordHash . new ( value , self . rounds ) elif value is not None : raise TypeError ( 'Cannot convert {} to a PasswordHash' . format ( type ( value ) ) )
def takeexactly ( iterable , size ) : """Yield blocks from ` iterable ` until exactly len ( size ) have been returned . Args : iterable ( iterable ) : Any iterable that yields sliceable objects that have length . size ( int ) : How much data to consume Yields : blocks from ` iterable ` such that sum ( len ( block ) for block in takeexactly ( iterable , size ) ) = = size Raises : ValueError if there is less than ` size ` data in ` iterable `"""
total = 0 for block in iterable : n = min ( len ( block ) , size - total ) block = block [ : n ] if block : yield block total += len ( block ) if total >= size : break if total < size : raise ValueError ( 'not enough data (yielded {} of {})' ) # sanity check ; this should never happen if total != size : # pragma : no cover raise ValueError ( 'yielded too much data' )
def tree ( ) : """Return a tree of tuples representing the logger layout . Each tuple looks like ` ` ( ' logger - name ' , < Logger > , [ . . . ] ) ` ` where the third element is a list of zero or more child tuples that share the same layout ."""
root = ( '' , logging . root , [ ] ) nodes = { } items = list ( logging . root . manager . loggerDict . items ( ) ) # for Python 2 and 3 items . sort ( ) for name , logger in items : nodes [ name ] = node = ( name , logger , [ ] ) i = name . rfind ( '.' , 0 , len ( name ) - 1 ) # same formula used in ` logging ` if i == - 1 : parent = root else : parent = nodes [ name [ : i ] ] parent [ 2 ] . append ( node ) return root
def update_remote_ids ( self , remote_folder ) : """Set remote id based on remote _ folder and check children against this folder ' s children . : param remote _ folder : RemoteFolder to compare against"""
self . remote_id = remote_folder . id _update_remote_children ( remote_folder , self . children )
def begin_span ( self , name , span_type , context = None , leaf = False , tags = None ) : """Begin a new span : param name : name of the span : param span _ type : type of the span : param context : a context dict : param leaf : True if this is a leaf span : param tags : a flat string / string dict of tags : return : the Span object"""
return self . _begin_span ( name , span_type , context = context , leaf = leaf , tags = tags , parent_span_id = None )
def do_create ( marfile , files , compress , productversion = None , channel = None , signing_key = None , signing_algorithm = None ) : """Create a new MAR file ."""
with open ( marfile , 'w+b' ) as f : with MarWriter ( f , productversion = productversion , channel = channel , signing_key = signing_key , signing_algorithm = signing_algorithm , ) as m : for f in files : m . add ( f , compress = compress )
def evaluate_service_changes ( services , envs , repo_root , func ) : """Given a dict of services , and a list of environments , apply the diff function to evaluate the differences between the target environments and the rendered templates . Sub - services ( names with ' . ' in them ) are skipped ."""
for service_name , service in services . iteritems ( ) : for env_category in service [ 'environments' ] : if env_category not in get_env_categories ( envs ) : logger . debug ( 'Skipping not-included environment `%s` for service `%s`' , env_category , service_name ) continue environment = generate_test_environment_name ( env_category ) cf_client = get_cloudformation_client ( service_name , environment ) func ( service_name , service , environment , cf_client , repo_root )
def compile ( self , node , * args , ** kwargs ) : """Parse a WhereNode to a LDAP filter string ."""
if isinstance ( node , WhereNode ) : return where_node_as_ldap ( node , self , self . connection ) return super ( SQLCompiler , self ) . compile ( node , * args , ** kwargs )
def parse_on_event ( self , node ) : """Parses < OnEvent > @ param node : Node containing the < OnEvent > element @ type node : xml . etree . Element"""
try : port = node . lattrib [ 'port' ] except : self . raise_error ( '<OnEvent> must specify a port.' ) event_handler = OnEvent ( port ) self . current_regime . add_event_handler ( event_handler ) self . current_event_handler = event_handler self . process_nested_tags ( node ) self . current_event_handler = None
def _action_remove ( self , ids ) : """Remove IDs from the group Parameters ids : { list , set , tuple , generator } of str The IDs to remove Returns list of dict The details of the removed jobs"""
return self . _action_get ( ( self . unlisten_to_node ( id_ ) for id_ in ids ) )
def get_qr ( self , filename = None ) : """Get pairing QR code from client"""
if "Click to reload QR code" in self . driver . page_source : self . reload_qr ( ) qr = self . driver . find_element_by_css_selector ( self . _SELECTORS [ 'qrCode' ] ) if filename is None : fd , fn_png = tempfile . mkstemp ( prefix = self . username , suffix = '.png' ) else : fd = os . open ( filename , os . O_RDWR | os . O_CREAT ) fn_png = os . path . abspath ( filename ) self . logger . debug ( "QRcode image saved at %s" % fn_png ) qr . screenshot ( fn_png ) os . close ( fd ) return fn_png
def send_email_message ( self , recipient , subject , html_message , text_message , sender_email , sender_name ) : """Send email message via Flask - Mail . Args : recipient : Email address or tuple of ( Name , Email - address ) . subject : Subject line . html _ message : The message body in HTML . text _ message : The message body in plain text ."""
# Construct sender from sender _ name and sender _ email sender = '"%s" <%s>' % ( sender_name , sender_email ) if sender_name else sender_email # Send email via SMTP except when we ' re testing if not current_app . testing : # pragma : no cover try : # Prepare email message from flask_mail import Message message = Message ( subject , sender = sender , recipients = [ recipient ] , html = html_message , body = text_message ) # Send email message self . mail . send ( message ) # Print helpful error messages on exceptions except ( socket . gaierror , socket . error ) as e : raise EmailError ( 'SMTP Connection error: Check your MAIL_SERVER and MAIL_PORT settings.' ) except smtplib . SMTPAuthenticationError : raise EmailError ( 'SMTP Authentication error: Check your MAIL_USERNAME and MAIL_PASSWORD settings.' )
def pack ( self , value = None ) : """Pack the struct in a binary representation . Merge some fields to ensure correct packing . Returns : bytes : Binary representation of this instance ."""
# Set the correct IHL based on options size if self . options : self . ihl += int ( len ( self . options ) / 4 ) # Set the correct packet length based on header length and data self . length = int ( self . ihl * 4 + len ( self . data ) ) self . _version_ihl = self . version << 4 | self . ihl self . _dscp_ecn = self . dscp << 2 | self . ecn self . _flags_offset = self . flags << 13 | self . offset # Set the checksum field before packing self . _update_checksum ( ) return super ( ) . pack ( )
def DbPutClassProperty ( self , argin ) : """Create / Update class property ( ies ) : param argin : Str [ 0 ] = Tango class name Str [ 1 ] = Property number Str [ 2 ] = Property name Str [ 3 ] = Property value number Str [ 4 ] = Property value 1 Str [ n ] = Property value n : type : tango . DevVarStringArray : return : : rtype : tango . DevVoid"""
self . _log . debug ( "In DbPutClassProperty()" ) class_name = argin [ 0 ] nb_properties = int ( argin [ 1 ] ) self . db . put_class_property ( class_name , nb_properties , argin [ 2 : ] )
def right_click ( self , x , y , n = 1 , pre_dl = None , post_dl = None ) : """Right click at ` ` ( x , y ) ` ` on screen for ` ` n ` ` times . at begin . * * 中文文档 * * 在屏幕的 ` ` ( x , y ) ` ` 坐标处右键单击 ` ` n ` ` 次 。"""
self . delay ( pre_dl ) self . m . click ( x , y , 2 , n ) self . delay ( post_dl )
async def exit_rescue_mode ( self , wait : bool = False , wait_interval : int = 5 ) : """Exit rescue mode . : param wait : If specified , wait until the deploy is complete . : param wait _ interval : How often to poll , defaults to 5 seconds"""
try : self . _data = await self . _handler . exit_rescue_mode ( system_id = self . system_id ) except CallError as error : if error . status == HTTPStatus . FORBIDDEN : message = "Not allowed to exit rescue mode." raise OperationNotAllowed ( message ) from error else : raise if not wait : return self else : # Wait for machine to finish exiting rescue mode while self . status == NodeStatus . EXITING_RESCUE_MODE : await asyncio . sleep ( wait_interval ) self . _data = await self . _handler . read ( system_id = self . system_id ) if self . status == NodeStatus . FAILED_EXITING_RESCUE_MODE : msg = "{hostname} failed to exit rescue mode." . format ( hostname = self . hostname ) raise RescueModeFailure ( msg , self ) return self
def get_cands_uri ( field , ccd , version = 'p' , ext = 'measure3.cands.astrom' , prefix = None , block = None ) : """return the nominal URI for a candidate file . @ param field : the OSSOS field name @ param ccd : which CCD are the candidates on @ param version : either the ' p ' , or ' s ' ( scrambled ) candidates . @ param ext : Perhaps we ' ll change this one day . @ param prefix : if this is a ' fake ' dataset then add ' fk ' @ param block : Which BLOCK of the field are we looking at ? eg . 15BS + 1 + 1 @ return :"""
if prefix is None : prefix = "" if len ( prefix ) > 0 : prefix += "_" if len ( field ) > 0 : field += "_" if ext is None : ext = "" if len ( ext ) > 0 and ext [ 0 ] != "." : ext = ".{}" . format ( ext ) measure3_dir = MEASURE3 if block is not None : measure3_dir + "/{}" . format ( block ) return "{}/{}{}{}{}{}" . format ( measure3_dir , prefix , field , version , ccd , ext )
def accept ( self ) : """accept ( ) - > ( socket object , address info ) Wait for an incoming connection . Return a new socket representing the connection , and the address of the client . For IP sockets , the address info is a pair ( hostaddr , port ) ."""
fd , addr = self . _accept ( ) sock = socket ( self . family , self . type , self . proto , fileno = fd ) # Issue # 7995 : if no default timeout is set and the listening # socket had a ( non - zero ) timeout , force the new socket in blocking # mode to override platform - specific socket flags inheritance . if getdefaulttimeout ( ) is None and self . gettimeout ( ) : sock . setblocking ( True ) return sock , addr
def version ( self ) : """Returns the node ' s RPC version : raises : : py : exc : ` nano . rpc . RPCException ` > > > rpc . version ( ) " rpc _ version " : 1, " store _ version " : 10, " node _ vendor " : " RaiBlocks 9.0" """
resp = self . call ( 'version' ) for key in ( 'rpc_version' , 'store_version' ) : resp [ key ] = int ( resp [ key ] ) return resp
def _find_relations ( self ) : """Find all relevant relation elements and return them in a list ."""
# Get all extractions extractions = list ( self . tree . execute ( "$.extractions[(@.@type is 'Extraction')]" ) ) # Get relations from extractions relations = [ ] for e in extractions : label_set = set ( e . get ( 'labels' , [ ] ) ) # If this is a DirectedRelation if 'DirectedRelation' in label_set : self . relation_dict [ e [ '@id' ] ] = e subtype = e . get ( 'subtype' ) if any ( t in subtype for t in polarities . keys ( ) ) : relations . append ( ( subtype , e ) ) # If this is an Event or an Entity if { 'Event' , 'Entity' } & label_set : self . concept_dict [ e [ '@id' ] ] = e if not relations and not self . relation_dict : logger . info ( "No relations found." ) else : logger . info ( '%d relations of types %s found' % ( len ( relations ) , ', ' . join ( polarities . keys ( ) ) ) ) logger . info ( '%d relations in dict.' % len ( self . relation_dict ) ) logger . info ( '%d concepts found.' % len ( self . concept_dict ) ) return relations
def read_packet ( self , size = MTU ) : """return a single packet read from the file bytes , ( sec , # timestamp seconds usec , # timestamp microseconds wirelen ) # actual length of packet returns None when no more packets are available"""
hdr = self . f . read ( 16 ) if len ( hdr ) < 16 : return None sec , usec , caplen , wirelen = struct . unpack ( self . endian + "IIII" , hdr ) s = self . f . read ( caplen ) [ : MTU ] return s , 0 , ( sec , usec , wirelen )
def IntraField ( config = { } ) : """Intra field interlace to sequential converter . This uses a vertical filter with an aperture of 8 lines , generated by : py : class : ` ~ pyctools . components . interp . filtergenerator . FilterGenerator ` . The aperture ( and other parameters ) can be adjusted after the : py : class : ` IntraField ` component is created ."""
return Compound ( config = config , deint = SimpleDeinterlace ( ) , interp = Resize ( ) , filgen = FilterGenerator ( yaperture = 8 , ycut = 50 ) , gain = Arithmetic ( func = 'data * pt_float(2)' ) , linkages = { ( 'self' , 'input' ) : [ ( 'deint' , 'input' ) ] , ( 'deint' , 'output' ) : [ ( 'interp' , 'input' ) ] , ( 'interp' , 'output' ) : [ ( 'self' , 'output' ) ] , ( 'filgen' , 'output' ) : [ ( 'gain' , 'input' ) ] , ( 'gain' , 'output' ) : [ ( 'interp' , 'filter' ) ] , } )
def copy_root_log_to_file ( filename : str , fmt : str = LOG_FORMAT , datefmt : str = LOG_DATEFMT ) -> None : """Copy all currently configured logs to the specified file . Should ONLY be called from the ` ` if _ _ name _ _ = = ' main ' ` ` script ; see https : / / docs . python . org / 3.4 / howto / logging . html # library - config ."""
fh = logging . FileHandler ( filename ) # default file mode is ' a ' for append formatter = logging . Formatter ( fmt = fmt , datefmt = datefmt ) fh . setFormatter ( formatter ) apply_handler_to_root_log ( fh )
def delete_forwarding_address ( payment_id , coin_symbol = 'btc' , api_key = None ) : '''Delete a forwarding address on a specific blockchain , using its payment id'''
assert payment_id , 'payment_id required' assert is_valid_coin_symbol ( coin_symbol ) assert api_key , 'api_key required' params = { 'token' : api_key } url = make_url ( ** dict ( payments = payment_id ) ) r = requests . delete ( url , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS ) return get_valid_json ( r , allow_204 = True )
def _move_bee ( self , bee , new_values ) : '''Moves a bee to a new position if new fitness score is better than the bee ' s current fitness score Args : bee ( EmployerBee ) : bee to move new _ values ( tuple ) : ( new score , new values , new fitness function return value )'''
score = np . nan_to_num ( new_values [ 0 ] ) if bee . score > score : bee . failed_trials += 1 else : bee . values = new_values [ 1 ] bee . score = score bee . error = new_values [ 2 ] bee . failed_trials = 0 self . _logger . log ( 'debug' , 'Bee assigned to new merged position' )
def plot_eval_results ( eval_results , metric = None , xaxislabel = None , yaxislabel = None , title = None , title_fontsize = 'x-large' , axes_title_fontsize = 'large' , show_metric_direction = True , metric_direction_font_size = 'large' , subplots_opts = None , subplots_adjust_opts = None , figsize = 'auto' , ** fig_kwargs ) : """Plot the evaluation results from ` eval _ results ` . ` eval _ results ` must be a sequence containing ` ( param , values ) ` tuples , where ` param ` is the parameter value to appear on the x axis and ` values ` can be a dict structure containing the metric values . ` eval _ results ` can be created using the ` results _ by _ parameter ` function from the ` topicmod . common ` module . Set ` metric ` to plot only a specific metric . Set ` xaxislabel ` for a label on the x - axis . Set ` yaxislabel ` for a label on the y - axis . Set ` title ` for a plot title . Options in a dict ` subplots _ opts ` will be passed to ` plt . subplots ( . . . ) ` . Options in a dict ` subplots _ adjust _ opts ` will be passed to ` fig . subplots _ adjust ( . . . ) ` . ` figsize ` can be set to a tuple ` ( width , height ) ` or to ` " auto " ` ( default ) which will set the size to ` ( 8 , 2 * < num . of metrics > ) ` ."""
if type ( eval_results ) not in ( list , tuple ) or not eval_results : raise ValueError ( '`eval_results` must be a list or tuple with at least one element' ) if type ( eval_results [ 0 ] ) not in ( list , tuple ) or len ( eval_results [ 0 ] ) != 2 : raise ValueError ( '`eval_results` must be a list or tuple containing a (param, values) tuple. ' 'Maybe `eval_results` must be converted with `results_by_parameter`.' ) if metric is not None and type ( metric ) not in ( list , tuple ) : metric = [ metric ] elif metric is None : # remove special evaluation result ' model ' : the calculated model itself metric = list ( set ( next ( iter ( eval_results ) ) [ 1 ] . keys ( ) ) - { 'model' } ) metric = sorted ( metric ) metric_direction = [ ] for m in metric : if m == 'perplexity' : metric_direction . append ( 'minimize' ) else : m_fn_name = 'metric_%s' % ( m [ : 16 ] if m . startswith ( 'coherence_gensim' ) else m ) m_fn = getattr ( evaluate , m_fn_name , None ) if m_fn : metric_direction . append ( getattr ( m_fn , 'direction' , 'unknown' ) ) else : metric_direction . append ( 'unknown' ) n_metrics = len ( metric ) assert n_metrics == len ( metric_direction ) metrics_ordered = [ ] for m_dir in sorted ( set ( metric_direction ) , reverse = True ) : metrics_ordered . extend ( [ ( m , d ) for m , d in zip ( metric , metric_direction ) if d == m_dir ] ) assert n_metrics == len ( metrics_ordered ) # get figure and subplots ( axes ) if figsize == 'auto' : figsize = ( 8 , 2 * n_metrics ) subplots_kwargs = dict ( nrows = n_metrics , ncols = 1 , sharex = True , constrained_layout = True , figsize = figsize ) subplots_kwargs . update ( subplots_opts or { } ) subplots_kwargs . update ( fig_kwargs ) fig , axes = plt . subplots ( ** subplots_kwargs ) # set title if title : fig . suptitle ( title , fontsize = title_fontsize ) x = list ( zip ( * eval_results ) ) [ 0 ] # set adjustments if title : subplots_adjust_kwargs = dict ( top = 0.9 , hspace = 0.3 ) else : subplots_adjust_kwargs = { } subplots_adjust_kwargs . update ( subplots_adjust_opts or { } ) if subplots_adjust_kwargs : fig . subplots_adjust ( ** subplots_adjust_kwargs ) # draw subplot for each metric axes_pos_per_dir = defaultdict ( list ) for i , ( ax , ( m , m_dir ) ) in enumerate ( zip ( axes . flatten ( ) , metrics_ordered ) ) : if show_metric_direction : axes_pos_per_dir [ m_dir ] . append ( ax . get_position ( ) ) y = [ metric_res [ m ] for _ , metric_res in eval_results ] ax . plot ( x , y , label = m ) ax . set_title ( m , fontsize = axes_title_fontsize ) # set axis labels if xaxislabel and i == len ( metric ) - 1 : ax . set_xlabel ( xaxislabel ) if yaxislabel : ax . set_ylabel ( yaxislabel ) # show grouped metric direction on the left if axes_pos_per_dir : # = if show _ metric _ direction left_xs = [ ] ys = [ ] for m_dir , bboxes in axes_pos_per_dir . items ( ) : left_xs . append ( min ( bb . x0 for bb in bboxes ) ) min_y = min ( bb . y0 for bb in bboxes ) max_y = max ( bb . y1 for bb in bboxes ) ys . append ( ( min_y , max_y ) ) left_x = min ( left_xs ) / 2.5 fig . lines = [ ] for ( min_y , max_y ) , m_dir in zip ( ys , axes_pos_per_dir . keys ( ) ) : center_y = min_y + ( max_y - min_y ) / 2 fig . lines . append ( Line2D ( ( left_x , left_x ) , ( min_y , max_y ) , transform = fig . transFigure , linewidth = 5 , color = 'lightgray' ) ) fig . text ( left_x / 1.5 , center_y , m_dir , fontsize = metric_direction_font_size , rotation = 'vertical' , horizontalalignment = 'right' , verticalalignment = 'center' ) return fig , axes
def _get_call_names_helper ( node ) : """Recursively finds all function names ."""
if isinstance ( node , ast . Name ) : if node . id not in BLACK_LISTED_CALL_NAMES : yield node . id elif isinstance ( node , ast . Subscript ) : yield from _get_call_names_helper ( node . value ) elif isinstance ( node , ast . Str ) : yield node . s elif isinstance ( node , ast . Attribute ) : yield node . attr yield from _get_call_names_helper ( node . value )
def specimens_results_magic ( infile = 'pmag_specimens.txt' , measfile = 'magic_measurements.txt' , sampfile = 'er_samples.txt' , sitefile = 'er_sites.txt' , agefile = 'er_ages.txt' , specout = 'er_specimens.txt' , sampout = 'pmag_samples.txt' , siteout = 'pmag_sites.txt' , resout = 'pmag_results.txt' , critout = 'pmag_criteria.txt' , instout = 'magic_instruments.txt' , plotsites = False , fmt = 'svg' , dir_path = '.' , cors = [ ] , priorities = [ 'DA-AC-ARM' , 'DA-AC-TRM' ] , coord = 'g' , user = '' , vgps_level = 'site' , do_site_intensity = True , DefaultAge = [ "none" ] , avg_directions_by_sample = False , avg_intensities_by_sample = False , avg_all_components = False , avg_by_polarity = False , skip_directions = False , skip_intensities = False , use_sample_latitude = False , use_paleolatitude = False , use_criteria = 'default' ) : """Writes magic _ instruments , er _ specimens , pmag _ samples , pmag _ sites , pmag _ criteria , and pmag _ results . The data used to write this is obtained by reading a pmag _ speciemns , a magic _ measurements , a er _ samples , a er _ sites , a er _ ages . @ param - > infile : path from the WD to the pmag speciemns table @ param - > measfile : path from the WD to the magic measurement file @ param - > sampfile : path from the WD to the er sample file @ param - > sitefile : path from the WD to the er sites data file @ param - > agefile : path from the WD to the er ages data file @ param - > specout : path from the WD to the place to write the er specimens data file @ param - > sampout : path from the WD to the place to write the pmag samples data file @ param - > siteout : path from the WD to the place to write the pmag sites data file @ param - > resout : path from the WD to the place to write the pmag results data file @ param - > critout : path from the WD to the place to write the pmag criteria file @ param - > instout : path from th WD to the place to write the magic instruments file @ param - > documentation incomplete if you know more about the purpose of the parameters in this function and it ' s side effects please extend and complete this string"""
# initialize some variables plotsites = False # cannot use draw _ figs from within ipmag Comps = [ ] # list of components version_num = pmag . get_version ( ) args = sys . argv model_lat_file = "" Dcrit , Icrit , nocrit = 0 , 0 , 0 corrections = [ ] nocorrection = [ 'DA-NL' , 'DA-AC' , 'DA-CR' ] # do some data adjustments for cor in cors : nocorrection . remove ( 'DA-' + cor ) corrections . append ( 'DA-' + cor ) for p in priorities : if not p . startswith ( 'DA-AC-' ) : p = 'DA-AC-' + p # translate coord into coords if coord == 's' : coords = [ '-1' ] if coord == 'g' : coords = [ '0' ] if coord == 't' : coords = [ '100' ] if coord == 'b' : coords = [ '0' , '100' ] if vgps_level == 'sample' : vgps = 1 # save sample level VGPS / VADMs else : vgps = 0 # site level if do_site_intensity : nositeints = 0 else : nositeints = 1 # chagne these all to True / False instead of 1/0 if not skip_intensities : # set model lat and if use_sample_latitude and use_paleolatitude : print ( "you should set a paleolatitude file OR use present day lat - not both" ) return False elif use_sample_latitude : get_model_lat = 1 elif use_paleolatitude : get_model_lat = 2 try : model_lat_file = dir_path + '/' + args [ ind + 1 ] get_model_lat = 2 mlat = open ( model_lat_file , 'r' ) ModelLats = [ ] for line in mlat . readlines ( ) : ModelLat = { } tmp = line . split ( ) ModelLat [ "er_site_name" ] = tmp [ 0 ] ModelLat [ "site_model_lat" ] = tmp [ 1 ] ModelLat [ "er_sample_name" ] = tmp [ 0 ] ModelLat [ "sample_lat" ] = tmp [ 1 ] ModelLats . append ( ModelLat ) mlat . clos ( ) except : print ( "use_paleolatitude option requires a valid paleolatitude file" ) else : get_model_lat = 0 # skips VADM calculation entirely if plotsites and not skip_directions : # plot by site - set up plot window EQ = { } EQ [ 'eqarea' ] = 1 # define figure 1 as equal area projection pmagplotlib . plot_init ( EQ [ 'eqarea' ] , 5 , 5 ) # I don ' t know why this has to be here , but otherwise the first plot # never plots . . . pmagplotlib . plot_net ( EQ [ 'eqarea' ] ) pmagplotlib . draw_figs ( EQ ) infile = os . path . join ( dir_path , infile ) measfile = os . path . join ( dir_path , measfile ) instout = os . path . join ( dir_path , instout ) sampfile = os . path . join ( dir_path , sampfile ) sitefile = os . path . join ( dir_path , sitefile ) agefile = os . path . join ( dir_path , agefile ) specout = os . path . join ( dir_path , specout ) sampout = os . path . join ( dir_path , sampout ) siteout = os . path . join ( dir_path , siteout ) resout = os . path . join ( dir_path , resout ) critout = os . path . join ( dir_path , critout ) if use_criteria == 'none' : Dcrit , Icrit , nocrit = 1 , 1 , 1 # no selection criteria crit_data = pmag . default_criteria ( nocrit ) elif use_criteria == 'default' : crit_data = pmag . default_criteria ( nocrit ) # use default criteria elif use_criteria == 'existing' : crit_data , file_type = pmag . magic_read ( critout ) # use pmag _ criteria file print ( "Acceptance criteria read in from " , critout ) accept = { } for critrec in crit_data : for key in list ( critrec . keys ( ) ) : # need to migrate specimen _ dang to specimen _ int _ dang for intensity # data using old format if 'IE-SPEC' in list ( critrec . keys ( ) ) and 'specimen_dang' in list ( critrec . keys ( ) ) and 'specimen_int_dang' not in list ( critrec . keys ( ) ) : critrec [ 'specimen_int_dang' ] = critrec [ 'specimen_dang' ] del critrec [ 'specimen_dang' ] # need to get rid of ron shaars sample _ int _ sigma _ uT if 'sample_int_sigma_uT' in list ( critrec . keys ( ) ) : critrec [ 'sample_int_sigma' ] = '%10.3e' % ( eval ( critrec [ 'sample_int_sigma_uT' ] ) * 1e-6 ) if key not in list ( accept . keys ( ) ) and critrec [ key ] != '' : accept [ key ] = critrec [ key ] if use_criteria == 'default' : pmag . magic_write ( critout , [ accept ] , 'pmag_criteria' ) print ( "\n Pmag Criteria stored in " , critout , '\n' ) # now we ' re done slow dancing # read in site data - has the lats and lons SiteNFO , file_type = pmag . magic_read ( sitefile ) # read in site data - has the lats and lons SampNFO , file_type = pmag . magic_read ( sampfile ) # find all the sites with height info . height_nfo = pmag . get_dictitem ( SiteNFO , 'site_height' , '' , 'F' ) if agefile : AgeNFO , file_type = pmag . magic_read ( agefile ) # read in the age information # read in specimen interpretations Data , file_type = pmag . magic_read ( infile ) # retrieve specimens with intensity data IntData = pmag . get_dictitem ( Data , 'specimen_int' , '' , 'F' ) comment , orient = "" , [ ] samples , sites = [ ] , [ ] for rec in Data : # run through the data filling in missing keys and finding all components , coordinates available # fill in missing fields , collect unique sample and site names if 'er_sample_name' not in list ( rec . keys ( ) ) : rec [ 'er_sample_name' ] = "" elif rec [ 'er_sample_name' ] not in samples : samples . append ( rec [ 'er_sample_name' ] ) if 'er_site_name' not in list ( rec . keys ( ) ) : rec [ 'er_site_name' ] = "" elif rec [ 'er_site_name' ] not in sites : sites . append ( rec [ 'er_site_name' ] ) if 'specimen_int' not in list ( rec . keys ( ) ) : rec [ 'specimen_int' ] = '' if 'specimen_comp_name' not in list ( rec . keys ( ) ) or rec [ 'specimen_comp_name' ] == "" : rec [ 'specimen_comp_name' ] = 'A' if rec [ 'specimen_comp_name' ] not in Comps : Comps . append ( rec [ 'specimen_comp_name' ] ) rec [ 'specimen_tilt_correction' ] = rec [ 'specimen_tilt_correction' ] . strip ( '\n' ) if "specimen_tilt_correction" not in list ( rec . keys ( ) ) : rec [ "specimen_tilt_correction" ] = "-1" # assume sample coordinates if rec [ "specimen_tilt_correction" ] not in orient : # collect available coordinate systems orient . append ( rec [ "specimen_tilt_correction" ] ) if "specimen_direction_type" not in list ( rec . keys ( ) ) : # assume direction is line - not plane rec [ "specimen_direction_type" ] = 'l' if "specimen_dec" not in list ( rec . keys ( ) ) : # if no declination , set direction type to blank rec [ "specimen_direction_type" ] = '' if "specimen_n" not in list ( rec . keys ( ) ) : rec [ "specimen_n" ] = '' # put in n if "specimen_alpha95" not in list ( rec . keys ( ) ) : rec [ "specimen_alpha95" ] = '' # put in alpha95 if "magic_method_codes" not in list ( rec . keys ( ) ) : rec [ "magic_method_codes" ] = '' # start parsing data into SpecDirs , SpecPlanes , SpecInts SpecInts , SpecDirs , SpecPlanes = [ ] , [ ] , [ ] samples . sort ( ) # get sorted list of samples and sites sites . sort ( ) if not skip_intensities : # don ' t skip intensities # retrieve specimens with intensity data IntData = pmag . get_dictitem ( Data , 'specimen_int' , '' , 'F' ) if nocrit == 0 : # use selection criteria for rec in IntData : # do selection criteria kill = pmag . grade ( rec , accept , 'specimen_int' ) if len ( kill ) == 0 : # intensity record to be included in sample , site # calculations SpecInts . append ( rec ) else : SpecInts = IntData [ : ] # take everything - no selection criteria # check for required data adjustments if len ( corrections ) > 0 and len ( SpecInts ) > 0 : for cor in corrections : # only take specimens with the required corrections SpecInts = pmag . get_dictitem ( SpecInts , 'magic_method_codes' , cor , 'has' ) if len ( nocorrection ) > 0 and len ( SpecInts ) > 0 : for cor in nocorrection : # exclude the corrections not specified for inclusion SpecInts = pmag . get_dictitem ( SpecInts , 'magic_method_codes' , cor , 'not' ) # take top priority specimen of its name in remaining specimens ( only one # per customer ) PrioritySpecInts = [ ] specimens = pmag . get_specs ( SpecInts ) # get list of uniq specimen names for spec in specimens : # all the records for this specimen ThisSpecRecs = pmag . get_dictitem ( SpecInts , 'er_specimen_name' , spec , 'T' ) if len ( ThisSpecRecs ) == 1 : PrioritySpecInts . append ( ThisSpecRecs [ 0 ] ) elif len ( ThisSpecRecs ) > 1 : # more than one prec = [ ] for p in priorities : # all the records for this specimen ThisSpecRecs = pmag . get_dictitem ( SpecInts , 'magic_method_codes' , p , 'has' ) if len ( ThisSpecRecs ) > 0 : prec . append ( ThisSpecRecs [ 0 ] ) PrioritySpecInts . append ( prec [ 0 ] ) # take the best one SpecInts = PrioritySpecInts # this has the first specimen record if not skip_directions : # don ' t skip directions # retrieve specimens with directed lines and planes AllDirs = pmag . get_dictitem ( Data , 'specimen_direction_type' , '' , 'F' ) # get all specimens with specimen _ n information Ns = pmag . get_dictitem ( AllDirs , 'specimen_n' , '' , 'F' ) if nocrit != 1 : # use selection criteria for rec in Ns : # look through everything with specimen _ n for " good " data kill = pmag . grade ( rec , accept , 'specimen_dir' ) if len ( kill ) == 0 : # nothing killed it SpecDirs . append ( rec ) else : # no criteria SpecDirs = AllDirs [ : ] # take them all # SpecDirs is now the list of all specimen directions ( lines and planes ) # that pass muster # list of all sample data and list of those that pass the DE - SAMP criteria PmagSamps , SampDirs = [ ] , [ ] PmagSites , PmagResults = [ ] , [ ] # list of all site data and selected results SampInts = [ ] for samp in samples : # run through the sample names if avg_directions_by_sample : # average by sample if desired # get all the directional data for this sample SampDir = pmag . get_dictitem ( SpecDirs , 'er_sample_name' , samp , 'T' ) if len ( SampDir ) > 0 : # there are some directions for coord in coords : # step through desired coordinate systems # get all the directions for this sample CoordDir = pmag . get_dictitem ( SampDir , 'specimen_tilt_correction' , coord , 'T' ) if len ( CoordDir ) > 0 : # there are some with this coordinate system if not avg_all_components : # look component by component for comp in Comps : # get all directions from this component CompDir = pmag . get_dictitem ( CoordDir , 'specimen_comp_name' , comp , 'T' ) if len ( CompDir ) > 0 : # there are some # get a sample average from all specimens PmagSampRec = pmag . lnpbykey ( CompDir , 'sample' , 'specimen' ) # decorate the sample record PmagSampRec [ "er_location_name" ] = CompDir [ 0 ] [ 'er_location_name' ] PmagSampRec [ "er_site_name" ] = CompDir [ 0 ] [ 'er_site_name' ] PmagSampRec [ "er_sample_name" ] = samp PmagSampRec [ "er_citation_names" ] = "This study" PmagSampRec [ "er_analyst_mail_names" ] = user PmagSampRec [ 'magic_software_packages' ] = version_num if CompDir [ 0 ] [ 'specimen_flag' ] == 'g' : PmagSampRec [ 'sample_flag' ] = 'g' else : PmagSampRec [ 'sample_flag' ] = 'b' if nocrit != 1 : PmagSampRec [ 'pmag_criteria_codes' ] = "ACCEPT" if agefile != "" : PmagSampRec = pmag . get_age ( PmagSampRec , "er_site_name" , "sample_inferred_" , AgeNFO , DefaultAge ) site_height = pmag . get_dictitem ( height_nfo , 'er_site_name' , PmagSampRec [ 'er_site_name' ] , 'T' ) if len ( site_height ) > 0 : # add in height if available PmagSampRec [ "sample_height" ] = site_height [ 0 ] [ 'site_height' ] PmagSampRec [ 'sample_comp_name' ] = comp PmagSampRec [ 'sample_tilt_correction' ] = coord PmagSampRec [ 'er_specimen_names' ] = pmag . get_list ( CompDir , 'er_specimen_name' ) # get a list of the specimen names used PmagSampRec [ 'magic_method_codes' ] = pmag . get_list ( CompDir , 'magic_method_codes' ) # get a list of the methods used if nocrit != 1 : # apply selection criteria kill = pmag . grade ( PmagSampRec , accept , 'sample_dir' ) else : kill = [ ] if len ( kill ) == 0 : SampDirs . append ( PmagSampRec ) if vgps == 1 : # if sample level VGP info desired , do that now PmagResRec = pmag . getsampVGP ( PmagSampRec , SiteNFO ) if PmagResRec != "" : PmagResults . append ( PmagResRec ) # print ( PmagSampRec ) PmagSamps . append ( PmagSampRec ) if avg_all_components : # average all components together basically same as above PmagSampRec = pmag . lnpbykey ( CoordDir , 'sample' , 'specimen' ) PmagSampRec [ "er_location_name" ] = CoordDir [ 0 ] [ 'er_location_name' ] PmagSampRec [ "er_site_name" ] = CoordDir [ 0 ] [ 'er_site_name' ] PmagSampRec [ "er_sample_name" ] = samp PmagSampRec [ "er_citation_names" ] = "This study" PmagSampRec [ "er_analyst_mail_names" ] = user PmagSampRec [ 'magic_software_packages' ] = version_num if all ( i [ 'specimen_flag' ] == 'g' for i in CoordDir ) : PmagSampRec [ 'sample_flag' ] = 'g' else : PmagSampRec [ 'sample_flag' ] = 'b' if nocrit != 1 : PmagSampRec [ 'pmag_criteria_codes' ] = "" if agefile != "" : PmagSampRec = pmag . get_age ( PmagSampRec , "er_site_name" , "sample_inferred_" , AgeNFO , DefaultAge ) site_height = pmag . get_dictitem ( height_nfo , 'er_site_name' , site , 'T' ) if len ( site_height ) > 0 : # add in height if available PmagSampRec [ "sample_height" ] = site_height [ 0 ] [ 'site_height' ] PmagSampRec [ 'sample_tilt_correction' ] = coord PmagSampRec [ 'sample_comp_name' ] = pmag . get_list ( CoordDir , 'specimen_comp_name' ) # get components used PmagSampRec [ 'er_specimen_names' ] = pmag . get_list ( CoordDir , 'er_specimen_name' ) # get specimne names averaged PmagSampRec [ 'magic_method_codes' ] = pmag . get_list ( CoordDir , 'magic_method_codes' ) # assemble method codes if nocrit != 1 : # apply selection criteria kill = pmag . grade ( PmagSampRec , accept , 'sample_dir' ) if len ( kill ) == 0 : # passes the mustard SampDirs . append ( PmagSampRec ) if vgps == 1 : PmagResRec = pmag . getsampVGP ( PmagSampRec , SiteNFO ) if PmagResRec != "" : PmagResults . append ( PmagResRec ) else : # take everything SampDirs . append ( PmagSampRec ) if vgps == 1 : PmagResRec = pmag . getsampVGP ( PmagSampRec , SiteNFO ) if PmagResRec != "" : PmagResults . append ( PmagResRec ) PmagSamps . append ( PmagSampRec ) if avg_intensities_by_sample : # average by sample if desired # get all the intensity data for this sample SampI = pmag . get_dictitem ( SpecInts , 'er_sample_name' , samp , 'T' ) if len ( SampI ) > 0 : # there are some # get average intensity stuff PmagSampRec = pmag . average_int ( SampI , 'specimen' , 'sample' ) # decorate sample record PmagSampRec [ "sample_description" ] = "sample intensity" PmagSampRec [ "sample_direction_type" ] = "" PmagSampRec [ 'er_site_name' ] = SampI [ 0 ] [ "er_site_name" ] PmagSampRec [ 'er_sample_name' ] = samp PmagSampRec [ 'er_location_name' ] = SampI [ 0 ] [ "er_location_name" ] PmagSampRec [ "er_citation_names" ] = "This study" PmagSampRec [ "er_analyst_mail_names" ] = user if agefile != "" : PmagSampRec = pmag . get_age ( PmagSampRec , "er_site_name" , "sample_inferred_" , AgeNFO , DefaultAge ) site_height = pmag . get_dictitem ( height_nfo , 'er_site_name' , PmagSampRec [ 'er_site_name' ] , 'T' ) if len ( site_height ) > 0 : # add in height if available PmagSampRec [ "sample_height" ] = site_height [ 0 ] [ 'site_height' ] PmagSampRec [ 'er_specimen_names' ] = pmag . get_list ( SampI , 'er_specimen_name' ) PmagSampRec [ 'magic_method_codes' ] = pmag . get_list ( SampI , 'magic_method_codes' ) if nocrit != 1 : # apply criteria ! kill = pmag . grade ( PmagSampRec , accept , 'sample_int' ) if len ( kill ) == 0 : PmagSampRec [ 'pmag_criteria_codes' ] = "ACCEPT" SampInts . append ( PmagSampRec ) PmagSamps . append ( PmagSampRec ) else : PmagSampRec = { } # sample rejected else : # no criteria SampInts . append ( PmagSampRec ) PmagSamps . append ( PmagSampRec ) PmagSampRec [ 'pmag_criteria_codes' ] = "" if vgps == 1 and get_model_lat != 0 and PmagSampRec != { } : if get_model_lat == 1 : # use sample latitude PmagResRec = pmag . getsampVDM ( PmagSampRec , SampNFO ) # get rid of the model lat key del ( PmagResRec [ 'model_lat' ] ) elif get_model_lat == 2 : # use model latitude PmagResRec = pmag . getsampVDM ( PmagSampRec , ModelLats ) if PmagResRec != { } : PmagResRec [ 'magic_method_codes' ] = PmagResRec [ 'magic_method_codes' ] + ":IE-MLAT" if PmagResRec != { } : PmagResRec [ 'er_specimen_names' ] = PmagSampRec [ 'er_specimen_names' ] PmagResRec [ 'er_sample_names' ] = PmagSampRec [ 'er_sample_name' ] PmagResRec [ 'pmag_criteria_codes' ] = 'ACCEPT' PmagResRec [ 'average_int_sigma_perc' ] = PmagSampRec [ 'sample_int_sigma_perc' ] PmagResRec [ 'average_int_sigma' ] = PmagSampRec [ 'sample_int_sigma' ] PmagResRec [ 'average_int_n' ] = PmagSampRec [ 'sample_int_n' ] PmagResRec [ 'vadm_n' ] = PmagSampRec [ 'sample_int_n' ] PmagResRec [ 'data_type' ] = 'i' PmagResults . append ( PmagResRec ) if len ( PmagSamps ) > 0 : # fill in missing keys from different types of records TmpSamps , keylist = pmag . fillkeys ( PmagSamps ) # save in sample output file pmag . magic_write ( sampout , TmpSamps , 'pmag_samples' ) print ( ' sample averages written to ' , sampout ) # create site averages from specimens or samples as specified for site in sites : for coord in coords : if not avg_directions_by_sample : key , dirlist = 'specimen' , SpecDirs # if specimen averages at site level desired if avg_directions_by_sample : key , dirlist = 'sample' , SampDirs # if sample averages at site level desired # get all the sites with directions tmp = pmag . get_dictitem ( dirlist , 'er_site_name' , site , 'T' ) # use only the last coordinate if avg _ all _ components = = False tmp1 = pmag . get_dictitem ( tmp , key + '_tilt_correction' , coord , 'T' ) # fish out site information ( lat / lon , etc . ) sd = pmag . get_dictitem ( SiteNFO , 'er_site_name' , site , 'T' ) if len ( sd ) > 0 : sitedat = sd [ 0 ] if not avg_all_components : # do component wise averaging for comp in Comps : # get all components comp siteD = pmag . get_dictitem ( tmp1 , key + '_comp_name' , comp , 'T' ) # remove bad data from means quality_siteD = [ ] # remove any records for which specimen _ flag or sample _ flag are ' b ' # assume ' g ' if flag is not provided for rec in siteD : spec_quality = rec . get ( 'specimen_flag' , 'g' ) samp_quality = rec . get ( 'sample_flag' , 'g' ) if ( spec_quality == 'g' ) and ( samp_quality == 'g' ) : quality_siteD . append ( rec ) siteD = quality_siteD if len ( siteD ) > 0 : # there are some for this site and component name # get an average for this site PmagSiteRec = pmag . lnpbykey ( siteD , 'site' , key ) # decorate the site record PmagSiteRec [ 'site_comp_name' ] = comp PmagSiteRec [ "er_location_name" ] = siteD [ 0 ] [ 'er_location_name' ] PmagSiteRec [ "er_site_name" ] = siteD [ 0 ] [ 'er_site_name' ] PmagSiteRec [ 'site_tilt_correction' ] = coord PmagSiteRec [ 'site_comp_name' ] = pmag . get_list ( siteD , key + '_comp_name' ) if avg_directions_by_sample : PmagSiteRec [ 'er_sample_names' ] = pmag . get_list ( siteD , 'er_sample_name' ) else : PmagSiteRec [ 'er_specimen_names' ] = pmag . get_list ( siteD , 'er_specimen_name' ) # determine the demagnetization code ( DC3,4 or 5 ) for this site AFnum = len ( pmag . get_dictitem ( siteD , 'magic_method_codes' , 'LP-DIR-AF' , 'has' ) ) Tnum = len ( pmag . get_dictitem ( siteD , 'magic_method_codes' , 'LP-DIR-T' , 'has' ) ) DC = 3 if AFnum > 0 : DC += 1 if Tnum > 0 : DC += 1 PmagSiteRec [ 'magic_method_codes' ] = pmag . get_list ( siteD , 'magic_method_codes' ) + ':' + 'LP-DC' + str ( DC ) PmagSiteRec [ 'magic_method_codes' ] . strip ( ":" ) if plotsites : print ( PmagSiteRec [ 'er_site_name' ] ) # plot and list the data pmagplotlib . plot_site ( EQ [ 'eqarea' ] , PmagSiteRec , siteD , key ) pmagplotlib . draw_figs ( EQ ) PmagSites . append ( PmagSiteRec ) else : # last component only # get the last orientation system specified siteD = tmp1 [ : ] if len ( siteD ) > 0 : # there are some # get the average for this site PmagSiteRec = pmag . lnpbykey ( siteD , 'site' , key ) # decorate the record PmagSiteRec [ "er_location_name" ] = siteD [ 0 ] [ 'er_location_name' ] PmagSiteRec [ "er_site_name" ] = siteD [ 0 ] [ 'er_site_name' ] PmagSiteRec [ 'site_comp_name' ] = comp PmagSiteRec [ 'site_tilt_correction' ] = coord PmagSiteRec [ 'site_comp_name' ] = pmag . get_list ( siteD , key + '_comp_name' ) PmagSiteRec [ 'er_specimen_names' ] = pmag . get_list ( siteD , 'er_specimen_name' ) PmagSiteRec [ 'er_sample_names' ] = pmag . get_list ( siteD , 'er_sample_name' ) AFnum = len ( pmag . get_dictitem ( siteD , 'magic_method_codes' , 'LP-DIR-AF' , 'has' ) ) Tnum = len ( pmag . get_dictitem ( siteD , 'magic_method_codes' , 'LP-DIR-T' , 'has' ) ) DC = 3 if AFnum > 0 : DC += 1 if Tnum > 0 : DC += 1 PmagSiteRec [ 'magic_method_codes' ] = pmag . get_list ( siteD , 'magic_method_codes' ) + ':' + 'LP-DC' + str ( DC ) PmagSiteRec [ 'magic_method_codes' ] . strip ( ":" ) if not avg_directions_by_sample : PmagSiteRec [ 'site_comp_name' ] = pmag . get_list ( siteD , key + '_comp_name' ) if plotsites : pmagplotlib . plot_site ( EQ [ 'eqarea' ] , PmagSiteRec , siteD , key ) pmagplotlib . draw_figs ( EQ ) PmagSites . append ( PmagSiteRec ) else : print ( 'site information not found in er_sites for site, ' , site , ' site will be skipped' ) for PmagSiteRec in PmagSites : # now decorate each dictionary some more , and calculate VGPs etc . for results table PmagSiteRec [ "er_citation_names" ] = "This study" PmagSiteRec [ "er_analyst_mail_names" ] = user PmagSiteRec [ 'magic_software_packages' ] = version_num if agefile != "" : PmagSiteRec = pmag . get_age ( PmagSiteRec , "er_site_name" , "site_inferred_" , AgeNFO , DefaultAge ) PmagSiteRec [ 'pmag_criteria_codes' ] = 'ACCEPT' if 'site_n_lines' in list ( PmagSiteRec . keys ( ) ) and 'site_n_planes' in list ( PmagSiteRec . keys ( ) ) and PmagSiteRec [ 'site_n_lines' ] != "" and PmagSiteRec [ 'site_n_planes' ] != "" : if int ( PmagSiteRec [ "site_n_planes" ] ) > 0 : PmagSiteRec [ "magic_method_codes" ] = PmagSiteRec [ 'magic_method_codes' ] + ":DE-FM-LP" elif int ( PmagSiteRec [ "site_n_lines" ] ) > 2 : PmagSiteRec [ "magic_method_codes" ] = PmagSiteRec [ 'magic_method_codes' ] + ":DE-FM" kill = pmag . grade ( PmagSiteRec , accept , 'site_dir' ) if len ( kill ) == 0 : PmagResRec = { } # set up dictionary for the pmag _ results table entry PmagResRec [ 'data_type' ] = 'i' # decorate it a bit PmagResRec [ 'magic_software_packages' ] = version_num PmagSiteRec [ 'site_description' ] = 'Site direction included in results table' PmagResRec [ 'pmag_criteria_codes' ] = 'ACCEPT' dec = float ( PmagSiteRec [ "site_dec" ] ) inc = float ( PmagSiteRec [ "site_inc" ] ) if 'site_alpha95' in list ( PmagSiteRec . keys ( ) ) and PmagSiteRec [ 'site_alpha95' ] != "" : a95 = float ( PmagSiteRec [ "site_alpha95" ] ) else : a95 = 180. sitedat = pmag . get_dictitem ( SiteNFO , 'er_site_name' , PmagSiteRec [ 'er_site_name' ] , 'T' ) [ 0 ] # fish out site information ( lat / lon , etc . ) lat = float ( sitedat [ 'site_lat' ] ) lon = float ( sitedat [ 'site_lon' ] ) plon , plat , dp , dm = pmag . dia_vgp ( dec , inc , a95 , lat , lon ) # get the VGP for this site if PmagSiteRec [ 'site_tilt_correction' ] == '-1' : C = ' (spec coord) ' if PmagSiteRec [ 'site_tilt_correction' ] == '0' : C = ' (geog. coord) ' if PmagSiteRec [ 'site_tilt_correction' ] == '100' : C = ' (strat. coord) ' PmagResRec [ "pmag_result_name" ] = "VGP Site: " + PmagSiteRec [ "er_site_name" ] # decorate some more PmagResRec [ "result_description" ] = "Site VGP, coord system = " + str ( coord ) + ' component: ' + comp PmagResRec [ 'er_site_names' ] = PmagSiteRec [ 'er_site_name' ] PmagResRec [ 'pmag_criteria_codes' ] = 'ACCEPT' PmagResRec [ 'er_citation_names' ] = 'This study' PmagResRec [ 'er_analyst_mail_names' ] = user PmagResRec [ "er_location_names" ] = PmagSiteRec [ "er_location_name" ] if avg_directions_by_sample : PmagResRec [ "er_sample_names" ] = PmagSiteRec [ "er_sample_names" ] else : PmagResRec [ "er_specimen_names" ] = PmagSiteRec [ "er_specimen_names" ] PmagResRec [ "tilt_correction" ] = PmagSiteRec [ 'site_tilt_correction' ] PmagResRec [ "pole_comp_name" ] = PmagSiteRec [ 'site_comp_name' ] PmagResRec [ "average_dec" ] = PmagSiteRec [ "site_dec" ] PmagResRec [ "average_inc" ] = PmagSiteRec [ "site_inc" ] PmagResRec [ "average_alpha95" ] = PmagSiteRec [ "site_alpha95" ] PmagResRec [ "average_n" ] = PmagSiteRec [ "site_n" ] PmagResRec [ "average_n_lines" ] = PmagSiteRec [ "site_n_lines" ] PmagResRec [ "average_n_planes" ] = PmagSiteRec [ "site_n_planes" ] PmagResRec [ "vgp_n" ] = PmagSiteRec [ "site_n" ] PmagResRec [ "average_k" ] = PmagSiteRec [ "site_k" ] PmagResRec [ "average_r" ] = PmagSiteRec [ "site_r" ] PmagResRec [ "average_lat" ] = '%10.4f ' % ( lat ) PmagResRec [ "average_lon" ] = '%10.4f ' % ( lon ) if agefile != "" : PmagResRec = pmag . get_age ( PmagResRec , "er_site_names" , "average_" , AgeNFO , DefaultAge ) site_height = pmag . get_dictitem ( height_nfo , 'er_site_name' , site , 'T' ) if len ( site_height ) > 0 : PmagResRec [ "average_height" ] = site_height [ 0 ] [ 'site_height' ] PmagResRec [ "vgp_lat" ] = '%7.1f ' % ( plat ) PmagResRec [ "vgp_lon" ] = '%7.1f ' % ( plon ) PmagResRec [ "vgp_dp" ] = '%7.1f ' % ( dp ) PmagResRec [ "vgp_dm" ] = '%7.1f ' % ( dm ) PmagResRec [ "magic_method_codes" ] = PmagSiteRec [ "magic_method_codes" ] if '0' in PmagSiteRec [ 'site_tilt_correction' ] and "DA-DIR-GEO" not in PmagSiteRec [ 'magic_method_codes' ] : PmagSiteRec [ 'magic_method_codes' ] = PmagSiteRec [ 'magic_method_codes' ] + ":DA-DIR-GEO" if '100' in PmagSiteRec [ 'site_tilt_correction' ] and "DA-DIR-TILT" not in PmagSiteRec [ 'magic_method_codes' ] : PmagSiteRec [ 'magic_method_codes' ] = PmagSiteRec [ 'magic_method_codes' ] + ":DA-DIR-TILT" PmagSiteRec [ 'site_polarity' ] = "" if avg_by_polarity : # assign polarity based on angle of pole lat to spin axis - may want to re - think this sometime angle = pmag . angle ( [ 0 , 0 ] , [ 0 , ( 90 - plat ) ] ) if angle <= 55. : PmagSiteRec [ "site_polarity" ] = 'n' if angle > 55. and angle < 125. : PmagSiteRec [ "site_polarity" ] = 't' if angle >= 125. : PmagSiteRec [ "site_polarity" ] = 'r' PmagResults . append ( PmagResRec ) if avg_by_polarity : # find the tilt corrected data crecs = pmag . get_dictitem ( PmagSites , 'site_tilt_correction' , '100' , 'T' ) if len ( crecs ) < 2 : # if there aren ' t any , find the geographic corrected data crecs = pmag . get_dictitem ( PmagSites , 'site_tilt_correction' , '0' , 'T' ) if len ( crecs ) > 2 : # if there are some , comp = pmag . get_list ( crecs , 'site_comp_name' ) . split ( ':' ) [ 0 ] # find the first component # fish out all of the first component crecs = pmag . get_dictitem ( crecs , 'site_comp_name' , comp , 'T' ) precs = [ ] for rec in crecs : precs . append ( { 'dec' : rec [ 'site_dec' ] , 'inc' : rec [ 'site_inc' ] , 'name' : rec [ 'er_site_name' ] , 'loc' : rec [ 'er_location_name' ] } ) # calculate average by polarity polpars = pmag . fisher_by_pol ( precs ) # hunt through all the modes ( normal = A , reverse = B , all = ALL ) for mode in list ( polpars . keys ( ) ) : PolRes = { } PolRes [ 'er_citation_names' ] = 'This study' PolRes [ "pmag_result_name" ] = "Polarity Average: Polarity " + mode PolRes [ "data_type" ] = "a" PolRes [ "average_dec" ] = '%7.1f' % ( polpars [ mode ] [ 'dec' ] ) PolRes [ "average_inc" ] = '%7.1f' % ( polpars [ mode ] [ 'inc' ] ) PolRes [ "average_n" ] = '%i' % ( polpars [ mode ] [ 'n' ] ) PolRes [ "average_r" ] = '%5.4f' % ( polpars [ mode ] [ 'r' ] ) PolRes [ "average_k" ] = '%6.0f' % ( polpars [ mode ] [ 'k' ] ) PolRes [ "average_alpha95" ] = '%7.1f' % ( polpars [ mode ] [ 'alpha95' ] ) PolRes [ 'er_site_names' ] = polpars [ mode ] [ 'sites' ] PolRes [ 'er_location_names' ] = polpars [ mode ] [ 'locs' ] PolRes [ 'magic_software_packages' ] = version_num PmagResults . append ( PolRes ) if not skip_intensities and nositeints != 1 : for site in sites : # now do intensities for each site if plotsites : print ( site ) if not avg_intensities_by_sample : key , intlist = 'specimen' , SpecInts # if using specimen level data if avg_intensities_by_sample : key , intlist = 'sample' , PmagSamps # if using sample level data # get all the intensities for this site Ints = pmag . get_dictitem ( intlist , 'er_site_name' , site , 'T' ) if len ( Ints ) > 0 : # there are some # get average intensity stuff for site table PmagSiteRec = pmag . average_int ( Ints , key , 'site' ) # get average intensity stuff for results table PmagResRec = pmag . average_int ( Ints , key , 'average' ) if plotsites : # if site by site examination requested - print this site out to the screen for rec in Ints : print ( rec [ 'er_' + key + '_name' ] , ' %7.1f' % ( 1e6 * float ( rec [ key + '_int' ] ) ) ) if len ( Ints ) > 1 : print ( 'Average: ' , '%7.1f' % ( 1e6 * float ( PmagResRec [ 'average_int' ] ) ) , 'N: ' , len ( Ints ) ) print ( 'Sigma: ' , '%7.1f' % ( 1e6 * float ( PmagResRec [ 'average_int_sigma' ] ) ) , 'Sigma %: ' , PmagResRec [ 'average_int_sigma_perc' ] ) input ( 'Press any key to continue\n' ) er_location_name = Ints [ 0 ] [ "er_location_name" ] # decorate the records PmagSiteRec [ "er_location_name" ] = er_location_name PmagSiteRec [ "er_citation_names" ] = "This study" PmagResRec [ "er_location_names" ] = er_location_name PmagResRec [ "er_citation_names" ] = "This study" PmagSiteRec [ "er_analyst_mail_names" ] = user PmagResRec [ "er_analyst_mail_names" ] = user PmagResRec [ "data_type" ] = 'i' if not avg_intensities_by_sample : PmagSiteRec [ 'er_specimen_names' ] = pmag . get_list ( Ints , 'er_specimen_name' ) # list of all specimens used PmagResRec [ 'er_specimen_names' ] = pmag . get_list ( Ints , 'er_specimen_name' ) PmagSiteRec [ 'er_sample_names' ] = pmag . get_list ( Ints , 'er_sample_name' ) # list of all samples used PmagResRec [ 'er_sample_names' ] = pmag . get_list ( Ints , 'er_sample_name' ) PmagSiteRec [ 'er_site_name' ] = site PmagResRec [ 'er_site_names' ] = site PmagSiteRec [ 'magic_method_codes' ] = pmag . get_list ( Ints , 'magic_method_codes' ) PmagResRec [ 'magic_method_codes' ] = pmag . get_list ( Ints , 'magic_method_codes' ) kill = pmag . grade ( PmagSiteRec , accept , 'site_int' ) if nocrit == 1 or len ( kill ) == 0 : b , sig = float ( PmagResRec [ 'average_int' ] ) , "" if ( PmagResRec [ 'average_int_sigma' ] ) != "" : sig = float ( PmagResRec [ 'average_int_sigma' ] ) # fish out site direction sdir = pmag . get_dictitem ( PmagResults , 'er_site_names' , site , 'T' ) # get the VDM for this record using last average # inclination ( hope it is the right one ! ) if len ( sdir ) > 0 and sdir [ - 1 ] [ 'average_inc' ] != "" : inc = float ( sdir [ 0 ] [ 'average_inc' ] ) # get magnetic latitude using dipole formula mlat = pmag . magnetic_lat ( inc ) # get VDM with magnetic latitude PmagResRec [ "vdm" ] = '%8.3e ' % ( pmag . b_vdm ( b , mlat ) ) PmagResRec [ "vdm_n" ] = PmagResRec [ 'average_int_n' ] if 'average_int_sigma' in list ( PmagResRec . keys ( ) ) and PmagResRec [ 'average_int_sigma' ] != "" : vdm_sig = pmag . b_vdm ( float ( PmagResRec [ 'average_int_sigma' ] ) , mlat ) PmagResRec [ "vdm_sigma" ] = '%8.3e ' % ( vdm_sig ) else : PmagResRec [ "vdm_sigma" ] = "" mlat = "" # define a model latitude if get_model_lat == 1 : # use present site latitude mlats = pmag . get_dictitem ( SiteNFO , 'er_site_name' , site , 'T' ) if len ( mlats ) > 0 : mlat = mlats [ 0 ] [ 'site_lat' ] # use a model latitude from some plate reconstruction model # ( or something ) elif get_model_lat == 2 : mlats = pmag . get_dictitem ( ModelLats , 'er_site_name' , site , 'T' ) if len ( mlats ) > 0 : PmagResRec [ 'model_lat' ] = mlats [ 0 ] [ 'site_model_lat' ] mlat = PmagResRec [ 'model_lat' ] if mlat != "" : # get the VADM using the desired latitude PmagResRec [ "vadm" ] = '%8.3e ' % ( pmag . b_vdm ( b , float ( mlat ) ) ) if sig != "" : vdm_sig = pmag . b_vdm ( float ( PmagResRec [ 'average_int_sigma' ] ) , float ( mlat ) ) PmagResRec [ "vadm_sigma" ] = '%8.3e ' % ( vdm_sig ) PmagResRec [ "vadm_n" ] = PmagResRec [ 'average_int_n' ] else : PmagResRec [ "vadm_sigma" ] = "" # fish out site information ( lat / lon , etc . ) sitedat = pmag . get_dictitem ( SiteNFO , 'er_site_name' , PmagSiteRec [ 'er_site_name' ] , 'T' ) if len ( sitedat ) > 0 : sitedat = sitedat [ 0 ] PmagResRec [ 'average_lat' ] = sitedat [ 'site_lat' ] PmagResRec [ 'average_lon' ] = sitedat [ 'site_lon' ] else : PmagResRec [ 'average_lon' ] = 'UNKNOWN' PmagResRec [ 'average_lon' ] = 'UNKNOWN' PmagResRec [ 'magic_software_packages' ] = version_num PmagResRec [ "pmag_result_name" ] = "V[A]DM: Site " + site PmagResRec [ "result_description" ] = "V[A]DM of site" PmagResRec [ "pmag_criteria_codes" ] = "ACCEPT" if agefile != "" : PmagResRec = pmag . get_age ( PmagResRec , "er_site_names" , "average_" , AgeNFO , DefaultAge ) site_height = pmag . get_dictitem ( height_nfo , 'er_site_name' , site , 'T' ) if len ( site_height ) > 0 : PmagResRec [ "average_height" ] = site_height [ 0 ] [ 'site_height' ] PmagSites . append ( PmagSiteRec ) PmagResults . append ( PmagResRec ) if len ( PmagSites ) > 0 : Tmp , keylist = pmag . fillkeys ( PmagSites ) pmag . magic_write ( siteout , Tmp , 'pmag_sites' ) print ( ' sites written to ' , siteout ) else : print ( "No Site level table" ) if len ( PmagResults ) > 0 : TmpRes , keylist = pmag . fillkeys ( PmagResults ) pmag . magic_write ( resout , TmpRes , 'pmag_results' ) print ( ' results written to ' , resout ) else : print ( "No Results level table" )
def _from_signer_and_info ( cls , signer , info , ** kwargs ) : """Creates a Credentials instance from a signer and service account info . Args : signer ( google . auth . crypt . Signer ) : The signer used to sign JWTs . info ( Mapping [ str , str ] ) : The service account info . kwargs : Additional arguments to pass to the constructor . Returns : google . auth . jwt . Credentials : The constructed credentials . Raises : ValueError : If the info is not in the expected format ."""
return cls ( signer , service_account_email = info [ 'client_email' ] , token_uri = info [ 'token_uri' ] , project_id = info . get ( 'project_id' ) , ** kwargs )
def handle_django_settings ( filename ) : '''Attempts to load a Django project and get package dependencies from settings . Tested using Django 1.4 and 1.8 . Not sure if some nuances are missed in the other versions .'''
old_sys_path = sys . path [ : ] dirpath = os . path . dirname ( filename ) project = os . path . basename ( dirpath ) cwd = os . getcwd ( ) project_path = os . path . normpath ( os . path . join ( dirpath , '..' ) ) if project_path not in sys . path : sys . path . insert ( 0 , project_path ) os . chdir ( project_path ) project_settings = '{}.settings' . format ( project ) os . environ [ 'DJANGO_SETTINGS_MODULE' ] = project_settings try : import django # Sanity django . setup = lambda : False except ImportError : log . error ( 'Found Django settings, but Django is not installed.' ) return log . warn ( 'Loading Django Settings (Using {}): {}' . format ( django . get_version ( ) , filename ) ) from django . conf import LazySettings installed_apps = set ( ) settings_imports = set ( ) try : settings = LazySettings ( ) settings . _setup ( ) for k , v in vars ( settings . _wrapped ) . items ( ) : if k not in _excluded_settings and re . match ( r'^[A-Z_]+$' , k ) : # log . debug ( ' Scanning Django setting : % s ' , k ) scan_django_settings ( v , settings_imports ) # Manually scan INSTALLED _ APPS since the broad scan won ' t include # strings without a period in it . for app in getattr ( settings , 'INSTALLED_APPS' , [ ] ) : if hasattr ( app , '__file__' ) and getattr ( app , '__file__' ) : imp , _ = utils . import_path_from_file ( getattr ( app , '__file__' ) ) installed_apps . add ( imp ) else : installed_apps . add ( app ) except Exception as e : log . error ( 'Could not load Django settings: %s' , e ) log . debug ( '' , exc_info = True ) return if not installed_apps or not settings_imports : log . error ( 'Got empty settings values from Django settings.' ) try : from django . apps . registry import apps , Apps , AppRegistryNotReady # Django doesn ' t like it when the initial instance of ` apps ` is reused , # but it has to be populated before other instances can be created . if not apps . apps_ready : apps . populate ( installed_apps ) else : apps = Apps ( installed_apps ) start = time . time ( ) while True : try : for app in apps . get_app_configs ( ) : installed_apps . add ( app . name ) except AppRegistryNotReady : if time . time ( ) - start > 10 : raise Exception ( 'Bail out of waiting for Django' ) log . debug ( 'Waiting for apps to load...' ) continue break except Exception as e : log . debug ( 'Could not use AppConfig: {}' . format ( e ) ) # Restore before sub scans can occur sys . path [ : ] = old_sys_path os . chdir ( cwd ) for item in settings_imports : need_scan = item . startswith ( _filescan_modules ) yield ( 'django' , item , project_path if need_scan else None ) for app in installed_apps : need_scan = app . startswith ( project ) yield ( 'django' , app , project_path if need_scan else None )
def url_tibiadata ( self ) : """: class : ` str ` : The URL to the TibiaData . com page of the house ."""
return self . get_url_tibiadata ( self . id , self . world ) if self . id and self . world else None
def find_usedby ( self , depslock_file_path , property_validate = True ) : """Find all dependencies by package : param depslock _ file _ path : : param property _ validate : for ` root ` packages we need check property , bad if we find packages from ` lock ` file , we can skip validate part : return :"""
if depslock_file_path is None : self . _raw = [ self . _params ] self . _raw [ 0 ] [ 'repo' ] = None self . _raw [ 0 ] [ 'server' ] = None else : self . _raw = [ x for x in self . _downloader . common_parser . iter_packages_params ( depslock_file_path ) ] self . packages = self . _downloader . get_usedby_packages ( { 'raw' : self . _raw } , property_validate = property_validate )
def rms ( signal , fs ) : """Returns the root mean square ( RMS ) of the given * signal * : param signal : a vector of electric potential : type signal : numpy . ndarray : param fs : samplerate of the signal ( Hz ) : type fs : int : returns : float - - the RMS value of the signal"""
# if a signal contains a some silence , taking the RMS of the whole # signal will be calculated as less loud as a signal without a silent # period . I don ' t like this , so I am going to chunk the signals , and # take the value of the most intense chunk chunk_time = 0.001 # 1 ms chunk chunk_samps = int ( chunk_time * fs ) amps = [ ] if chunk_samps > 10 : for i in range ( 0 , len ( signal ) - chunk_samps , chunk_samps ) : amps . append ( np . sqrt ( np . mean ( pow ( signal [ i : i + chunk_samps ] , 2 ) ) ) ) amps . append ( np . sqrt ( np . mean ( pow ( signal [ len ( signal ) - chunk_samps : ] , 2 ) ) ) ) return np . amax ( amps ) else : # samplerate low , just rms the whole thing return np . sqrt ( np . mean ( pow ( signal , 2 ) ) )
def get_decorated_names ( path , decorator_module , decorator_name ) : '''Get the name of fumctions or methods decorated with the specified decorator . If a method , the name will be as class _ name . method _ name . Args : path : The path to the module . decorator _ module : The name of the module defining the decorator . decorator _ name : The name of the decorator .'''
# Read the source with open ( path ) as f : module_source = f . read ( ) expression = '\s*@(?:{}\.)?{}[\s\S]+?(?:instance_creator\s*=\s*([\w.]+)[\s\S]+?)?\s*def\s+(\w+)\s*\(\s*(self)?' . format ( decorator_module , decorator_name ) methods = re . compile ( expression ) . findall ( module_source ) decorateds = [ x [ 1 ] for x in methods ] instance_creators = [ x [ 0 ] if x [ 0 ] != '' else None for x in methods ] is_methods = [ x [ 2 ] != '' for x in methods ] result = [ ] class_name_expression = '\s*(?:class\s+(\w+)\s*\(.*\:\n[\s\S]+?)+?\s+def\s+{}\s*\(' # Get class names for methods for i in range ( len ( decorateds ) ) : decorated = decorateds [ i ] # If a method if is_methods [ i ] : # Get the class name index = decorateds [ : i ] . count ( decorated ) class_name = re . compile ( class_name_expression . format ( decorated ) ) . findall ( module_source ) [ index ] original_name = decorated instance_creator = instance_creators [ i ] if decorated [ : 2 ] == '__' : decorated = '_' + class_name + decorated result . append ( { 'name' : decorated , 'class_name' : class_name , 'instance_creator' : instance_creator } ) else : result . append ( { 'name' : decorated } ) if DEBUG : print ( decorateds , instance_creators , is_methods , result ) return result
def del_password ( name , root = None ) : '''. . versionadded : : 2014.7.0 Delete the password from name user name User to delete root Directory to chroot into CLI Example : . . code - block : : bash salt ' * ' shadow . del _ password username'''
cmd = [ 'passwd' ] if root is not None : cmd . extend ( ( '-R' , root ) ) cmd . extend ( ( '-d' , name ) ) __salt__ [ 'cmd.run' ] ( cmd , python_shell = False , output_loglevel = 'quiet' ) uinfo = info ( name , root = root ) return not uinfo [ 'passwd' ] and uinfo [ 'name' ] == name
def scencd ( sc , sclkch , MXPART = None ) : """Encode character representation of spacecraft clock time into a double precision number . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / scencd _ c . html : param sc : NAIF spacecraft identification code . : type sc : int : param sclkch : Character representation of a spacecraft clock . : type sclkch : str : param MXPART : Maximum number of spacecraft clock partitions . : type MXPART : int : return : Encoded representation of the clock count . : rtype : float"""
sc = ctypes . c_int ( sc ) sclkch = stypes . stringToCharP ( sclkch ) sclkdp = ctypes . c_double ( ) libspice . scencd_c ( sc , sclkch , ctypes . byref ( sclkdp ) ) return sclkdp . value
def init_db ( drop_all = False , bind = engine ) : """Initialize the database , optionally dropping existing tables ."""
try : if drop_all : Base . metadata . drop_all ( bind = bind ) Base . metadata . create_all ( bind = bind ) except OperationalError as err : msg = 'password authentication failed for user "dallinger"' if msg in err . message : sys . stderr . write ( db_user_warning ) raise return session
def submit_error ( self , description , extra = None , default_message = None ) : """Send an error to bugzscout . Sends a request to the fogbugz URL for this instance . If a case exists with the * * same * * description , a new occurrence will be added to that case . It is advisable to remove personal info from the description for that reason . Account ids , emails , request ids , etc , will make the occurrence counting builtin to bugzscout less useful . Those values should go in the extra parameter , though , so the developer investigating the case has access to them . When extra is not specified , bugzscout will increase the number of occurrences for the case with the given description , but it will not include an entry for it ( unless it is a new case ) . : param description : string description for error : param extra : string details for error : param default _ message : string default message to return in responses"""
req_data = { 'ScoutUserName' : self . user , 'ScoutProject' : self . project , 'ScoutArea' : self . area , # When this matches , cases are grouped together . 'Description' : description , 'Extra' : extra , # 1 forces a new bug to be created . 'ForceNewBug' : 0 , 'ScoutDefaultMessage' : default_message , # 0 sends XML response , 1 sends HTML response . 'FriendlyResponse' : 0 , } LOG . debug ( 'Making bugzscout request to {0} with body {1}' . format ( self . url , req_data ) ) resp = requests . post ( self . url , data = req_data ) LOG . debug ( 'Response from bugzscout request: {0} body:\n{1}' . format ( resp , resp . content ) ) if resp . ok : LOG . info ( 'Successfully submitted error to bugzscout.' ) else : LOG . warn ( 'Failed to submit error to bugzscout: {0}' . format ( resp . reason ) )
def get_osid_object_mdata ( ) : """Return default mdata map for OsidObject"""
return { 'display_name' : { 'element_label' : { 'text' : 'Display Name' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'Required, 255 character maximum' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ { 'text' : '' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 256 , 'string_set' : [ ] } , 'description' : { 'element_label' : { 'text' : 'Description' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'Optional' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ { 'text' : '' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 1024 , 'string_set' : [ ] } , 'genus_type' : { 'element_label' : { 'text' : 'Genus Type' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'Required genus Type of type osid.type.Type' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : True , 'value' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_type_values' : [ str ( DEFAULT_GENUS_TYPE ) ] , 'syntax' : 'TYPE' , 'type_set' : [ ] } }
def _safe_exit ( start , output ) : """exit without breaking pipes"""
try : sys . stdout . write ( output ) sys . stdout . flush ( ) except TypeError : # python3 sys . stdout . write ( str ( output , 'utf-8' ) ) sys . stdout . flush ( ) except IOError : pass seconds = time . time ( ) - start print ( "\n\n%5.3f seconds" % ( seconds ) , file = sys . stderr )
def circ_corrcl ( x , y , tail = 'two-sided' ) : """Correlation coefficient between one circular and one linear variable random variables . Parameters x : np . array First circular variable ( expressed in radians ) y : np . array Second circular variable ( linear ) tail : string Specify whether to return ' one - sided ' or ' two - sided ' p - value . Returns r : float Correlation coefficient pval : float Uncorrected p - value Notes Python code borrowed from brainpipe ( based on the MATLAB toolbox CircStats ) Please note that NaN are automatically removed from datasets . Examples Compute the r and p - value between one circular and one linear variables . > > > from pingouin import circ _ corrcl > > > x = [ 0.785 , 1.570 , 3.141 , 0.839 , 5.934] > > > y = [ 1.593 , 1.291 , - 0.248 , - 2.892 , 0.102] > > > r , pval = circ _ corrcl ( x , y ) > > > print ( r , pval ) 0.109 0.9708899750629236"""
from scipy . stats import pearsonr , chi2 x = np . asarray ( x ) y = np . asarray ( y ) # Check size if x . size != y . size : raise ValueError ( 'x and y must have the same length.' ) # Remove NA x , y = remove_na ( x , y , paired = True ) n = x . size # Compute correlation coefficent for sin and cos independently rxs = pearsonr ( y , np . sin ( x ) ) [ 0 ] rxc = pearsonr ( y , np . cos ( x ) ) [ 0 ] rcs = pearsonr ( np . sin ( x ) , np . cos ( x ) ) [ 0 ] # Compute angular - linear correlation ( equ . 27.47) r = np . sqrt ( ( rxc ** 2 + rxs ** 2 - 2 * rxc * rxs * rcs ) / ( 1 - rcs ** 2 ) ) # Compute p - value pval = chi2 . sf ( n * r ** 2 , 2 ) pval = pval / 2 if tail == 'one-sided' else pval return np . round ( r , 3 ) , pval
def default ( self , value ) : """Convert mongo . ObjectId ."""
if isinstance ( value , ObjectId ) : return str ( value ) return super ( ElasticJSONSerializer , self ) . default ( value )
def fprime ( self , w , * args ) : """Return the derivatives of the cost function for predictions . Args : w ( array of float ) : weight vectors such that : w [ : - h1 ] - - weights between the input and h layers w [ - h1 : ] - - weights between the h and output layers args : features ( args [ 0 ] ) and target ( args [ 1 ] ) Returns : gradients of the cost function for predictions"""
x0 = args [ 0 ] x1 = args [ 1 ] n0 = x0 . shape [ 0 ] n1 = x1 . shape [ 0 ] # n - - number of pairs to evaluate n = max ( n0 , n1 ) * 10 idx0 = np . random . choice ( range ( n0 ) , size = n ) idx1 = np . random . choice ( range ( n1 ) , size = n ) # b - - bias for the input and h layers b = np . ones ( ( n , 1 ) ) i1 = self . i + 1 h = self . h h1 = h + 1 w2 = w [ - h1 : ] . reshape ( h1 , 1 ) w1 = w [ : - h1 ] . reshape ( i1 , h ) if sparse . issparse ( x0 ) : x0 = x0 . tocsr ( ) [ idx0 ] x1 = x1 . tocsr ( ) [ idx1 ] xb0 = sparse . hstack ( ( x0 , b ) ) xb1 = sparse . hstack ( ( x1 , b ) ) else : x0 = x0 [ idx0 ] x1 = x1 [ idx1 ] xb0 = np . hstack ( ( x0 , b ) ) xb1 = np . hstack ( ( x1 , b ) ) z0 = np . hstack ( ( sigm ( xb0 . dot ( w1 ) ) , b ) ) z1 = np . hstack ( ( sigm ( xb1 . dot ( w1 ) ) , b ) ) y0 = z0 . dot ( w2 ) y1 = z1 . dot ( w2 ) # e = 1 - sigm ( y1 - y0) # dy = e * dsigm ( y1 - y0) e = 1 - ( y1 - y0 ) dy = e / n # Calculate the derivative of the cost function w . r . t . F and w2 where : # F - - weights between the input and h layers # w2 - - weights between the h and output layers dw1 = - ( xb1 . T . dot ( dy . dot ( w2 [ : - 1 ] . reshape ( 1 , h ) ) * dsigm ( xb1 . dot ( w1 ) ) ) - xb0 . T . dot ( dy . dot ( w2 [ : - 1 ] . reshape ( 1 , h ) ) * dsigm ( xb0 . dot ( w1 ) ) ) ) . reshape ( i1 * h ) + self . l1 * w [ : - h1 ] / ( i1 * h ) dw2 = - ( z1 - z0 ) . T . dot ( dy ) . reshape ( h1 ) + self . l2 * w [ - h1 : ] / h1 return np . append ( dw1 , dw2 )
def validate ( self , value , add_comments = False , schema_name = "map" ) : """verbose - also return the jsonschema error details"""
validator = self . get_schema_validator ( schema_name ) error_messages = [ ] if isinstance ( value , list ) : for d in value : error_messages += self . _validate ( d , validator , add_comments , schema_name ) else : error_messages = self . _validate ( value , validator , add_comments , schema_name ) return error_messages
def AttachUserList ( client , ad_group_id , user_list_id ) : """Links the provided ad group and user list . Args : client : an AdWordsClient instance . ad _ group _ id : an int ad group ID . user _ list _ id : an int user list ID . Returns : The ad group criterion that was successfully created ."""
ad_group_criterion_service = client . GetService ( 'AdGroupCriterionService' , 'v201809' ) user_list = { 'xsi_type' : 'CriterionUserList' , 'userListId' : user_list_id } ad_group_criterion = { 'xsi_type' : 'BiddableAdGroupCriterion' , 'criterion' : user_list , 'adGroupId' : ad_group_id } operations = [ { 'operator' : 'ADD' , 'operand' : ad_group_criterion } ] return ad_group_criterion_service . mutate ( operations ) [ 'value' ] [ 0 ]
def read_data ( self , size ) : """Receive data from the device . If the read fails for any reason , an : obj : ` IOError ` exception is raised . : param size : the number of bytes to read . : type size : int : return : the data received . : rtype : list ( int )"""
result = self . dev . bulkRead ( 0x81 , size , timeout = 1200 ) if not result or len ( result ) < size : raise IOError ( 'pywws.device_libusb1.USBDevice.read_data failed' ) # Python2 libusb1 version 1.5 and earlier returns a string if not isinstance ( result [ 0 ] , int ) : result = map ( ord , result ) return list ( result )
def database_rename ( object_id , input_params = { } , always_retry = True , ** kwargs ) : """Invokes the / database - xxxx / rename API method . For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Name # API - method % 3A - % 2Fclass - xxxx % 2Frename"""
return DXHTTPRequest ( '/%s/rename' % object_id , input_params , always_retry = always_retry , ** kwargs )
def computeStrongestPaths ( self , profile , pairwisePreferences ) : """Returns a two - dimensional dictionary that associates every pair of candidates , cand1 and cand2 , with the strongest path from cand1 to cand2. : ivar Profile profile : A Profile object that represents an election profile . : ivar dict < int , dict < int , int > > pairwisePreferences : A two - dimensional dictionary that associates every pair of candidates , cand1 and cand2 , with number of voters who prefer cand1 to cand2."""
cands = profile . candMap . keys ( ) numCands = len ( cands ) # Initialize the two - dimensional dictionary that will hold our strongest paths . strongestPaths = dict ( ) for cand in cands : strongestPaths [ cand ] = dict ( ) for i in range ( 1 , numCands + 1 ) : for j in range ( 1 , numCands + 1 ) : if ( i == j ) : continue if pairwisePreferences [ i ] [ j ] > pairwisePreferences [ j ] [ i ] : strongestPaths [ i ] [ j ] = pairwisePreferences [ i ] [ j ] else : strongestPaths [ i ] [ j ] = 0 for i in range ( 1 , numCands + 1 ) : for j in range ( 1 , numCands + 1 ) : if ( i == j ) : continue for k in range ( 1 , numCands + 1 ) : if ( i == k or j == k ) : continue strongestPaths [ j ] [ k ] = max ( strongestPaths [ j ] [ k ] , min ( strongestPaths [ j ] [ i ] , strongestPaths [ i ] [ k ] ) ) return strongestPaths
def initialize_plugin ( self ) : """Initialize the plugin"""
# Generate a list of blacklisted packages from the configuration and # store it into self . blacklist _ package _ names attribute so this # operation doesn ' t end up in the fastpath . if not self . whitelist_package_names : self . whitelist_package_names = self . _determine_unfiltered_package_names ( ) logger . info ( f"Initialized project plugin {self.name}, filtering " + f"{self.whitelist_package_names}" )
def get ( self ) : """Constructs a ExecutionContextContext : returns : twilio . rest . studio . v1 . flow . execution . execution _ context . ExecutionContextContext : rtype : twilio . rest . studio . v1 . flow . execution . execution _ context . ExecutionContextContext"""
return ExecutionContextContext ( self . _version , flow_sid = self . _solution [ 'flow_sid' ] , execution_sid = self . _solution [ 'execution_sid' ] , )
def gc3 ( args ) : """% prog gc3 ksfile cdsfile [ cdsfile2 ] - o newksfile Filter the Ks results to remove high GC3 genes . High GC3 genes are problematic in Ks calculation - see Tang et al . 2010 PNAS . Specifically , the two calculation methods produce drastically different results for these pairs . Therefore we advise to remoeve these high GC3 genes . This is often the case for studying cereal genes . If 2 genomes are involved , the cdsfile of the 2nd genome can be provided concatenated or separated ."""
p = OptionParser ( gc3 . __doc__ ) p . add_option ( "--plot" , default = False , action = "store_true" , help = "Also plot the GC3 histogram [default: %default]" ) p . set_outfile ( ) opts , args = p . parse_args ( args ) outfile = opts . outfile plot = opts . plot if not 1 < len ( args ) < 4 : sys . exit ( not p . print_help ( ) ) ks_file , cdsfile = args [ : 2 ] GC3 = get_GC3 ( cdsfile ) if plot : plot_GC3 ( GC3 , cdsfile , fill = "green" ) if len ( args ) == 3 : cdsfile2 = args [ 2 ] GC3_2 = get_GC3 ( cdsfile2 ) GC3 . update ( GC3_2 ) if plot : plot_GC3 ( GC3_2 , cdsfile2 , fill = "lightgreen" ) data = KsFile ( ks_file ) noriginals = len ( data ) fw = must_open ( outfile , "w" ) writer = csv . writer ( fw ) writer . writerow ( fields . split ( "," ) ) nlines = 0 cutoff = .75 for d in data : a , b = d . name . split ( ";" ) aratio , bratio = GC3 [ a ] , GC3 [ b ] if ( aratio + bratio ) / 2 > cutoff : continue writer . writerow ( d ) nlines += 1 logging . debug ( "{0} records written (from {1})." . format ( nlines , noriginals ) )
def convert_to_categorical ( df ) : """Run a heuristic on the columns of the dataframe to determine whether it contains categorical values if the heuristic decides it ' s categorical then the type of the column is changed Args : df ( dataframe ) : The dataframe to check for categorical data"""
might_be_categorical = df . select_dtypes ( include = [ object ] ) . columns . tolist ( ) for column in might_be_categorical : if df [ column ] . nunique ( ) < 20 : # Convert the column print ( 'Changing column {:s} to category...' . format ( column ) ) df [ column ] = pd . Categorical ( df [ column ] )
def remove_connection ( self , interface1 , interface2 ) : """Remove a connection between two interfaces"""
uri = "api/interface/disconnect/%s/%s/" % ( interface1 , interface2 ) return self . delete ( uri )
def match_non_rule_patterns ( fixed_text , cur = 0 ) : """Matches given text at cursor position with non rule patterns Returns a dictionary of three elements : - " matched " - Bool : depending on if match found - " found " - string / None : Value of matched pattern ' s ' find ' key or none - " replaced " : string Replaced string if match found else input string at cursor"""
pattern = exact_find_in_pattern ( fixed_text , cur , NON_RULE_PATTERNS ) if len ( pattern ) > 0 : return { "matched" : True , "found" : pattern [ 0 ] [ 'find' ] , "replaced" : pattern [ 0 ] [ 'replace' ] } else : return { "matched" : False , "found" : None , "replaced" : fixed_text [ cur ] }