signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def ctrl_c ( self , pre_dl = None , post_dl = None ) : """Press Ctrl + C , usually for copy . * * 中文文档 * * 按下 Ctrl + C 组合键 , 通常用于复制 。"""
self . delay ( pre_dl ) self . k . press_key ( self . k . control_key ) self . k . tap_key ( "c" ) self . k . release_key ( self . k . control_key ) self . delay ( post_dl )
def _load ( cls , prefix , user_agent_config_yaml , user_agent_lookup = None ) : # type : ( str , str , Optional [ str ] ) - > str """Load user agent YAML file Args : prefix ( str ) : Text to put at start of user agent user _ agent _ config _ yaml ( str ) : Path to user agent YAML file user _ agent _ lookup ( Optional [ str ] ) : Lookup key for YAML . Ignored if user _ agent supplied . Returns : str : user agent"""
if not user_agent_config_yaml : user_agent_config_yaml = cls . default_user_agent_config_yaml logger . info ( 'No user agent or user agent config file given. Using default user agent config file: %s.' % user_agent_config_yaml ) if not isfile ( user_agent_config_yaml ) : raise UserAgentError ( "User_agent should be supplied in a YAML config file. It can be your project's name for example." ) logger . info ( 'Loading user agent config from: %s' % user_agent_config_yaml ) user_agent_config_dict = load_yaml ( user_agent_config_yaml ) if user_agent_lookup : user_agent_config_dict = user_agent_config_dict . get ( user_agent_lookup ) if not user_agent_config_dict : raise UserAgentError ( "No user agent information read from: %s" % user_agent_config_yaml ) ua = user_agent_config_dict . get ( 'user_agent' ) return cls . _construct ( user_agent_config_dict , prefix , ua )
def ftr_process ( url = None , content = None , config = None , base_url = None ) : u"""process an URL , or some already fetched content from a given URL . : param url : The URL of article to extract . Can be ` ` None ` ` , but only if you provide both ` ` content ` ` and ` ` config ` ` parameters . : type url : str , unicode or ` ` None ` ` : param content : the HTML content already downloaded . If given , it will be used for extraction , and the ` ` url ` ` parameter will be used only for site config lookup if ` ` config ` ` is not given . Please , only ` ` unicode ` ` to avoid charset errors . : type content : unicode or ` ` None ` ` : param config : if ` ` None ` ` , it will be looked up from ` ` url ` ` with as much love and AI as possible . But don ' t expect too much . : type config : a : class : ` SiteConfig ` instance or ` ` None ` ` : param base _ url : reserved parameter , used when fetching multi - pages URLs . It will hold the base URL ( the first one fetched ) , and will serve as base for fixing non - schemed URLs or query _ string - only links to next page ( s ) . Please do not set this parameter until you very know what you are doing . Default : ` ` None ` ` . : type base _ url : str or unicode or None : raises : - : class : ` RuntimeError ` in all parameters - incompatible situations . Please RFTD carefully , and report strange unicornic edge - cases . - : class : ` SiteConfigNotFound ` if no five - filter site config can be found . - any raw ` ` requests . * ` ` exception , network related , if anything goes wrong during url fetching . : returns : - either a : class : ` ContentExtractor ` instance with extracted ( and : attr : ` . failures ` ) attributes set , in case a site config could be found . When the extractor knows how to handle multiple - pages articles , all pages contents will be extracted and cleaned — if relevant — and concatenated into the instance : attr : ` body ` attribute . The : attr : ` next _ page _ link ` attribute will be a ` ` list ` ` containing all sub - pages links . Note : the first link is the one you fed the extractor with ; it will not be repeated in the list . - or ` ` None ` ` , if content was not given and url fetching returned a non - OK HTTP code , or if no site config could be found ( in that particular case , no extraction at all is performed ) ."""
if url is None and content is None and config is None : raise RuntimeError ( 'At least one of url or the couple content/config ' 'argument must be present.' ) if content is not None and url is None and config is None : raise RuntimeError ( 'Passing content only will not give any result.' ) if content is None : if url is None : raise RuntimeError ( 'When content is unset, url must be set.' ) try : result = requests_get ( url ) if result . status_code != requests . codes . ok : LOGGER . error ( u'Wrong status code in return while getting ' u'“%s”.' , url ) return None # Override before accessing result . text ; see ` requests ` doc . result . encoding = detect_encoding_from_requests_response ( result ) LOGGER . info ( u'Downloaded %s bytes as %s text.' , len ( result . text ) , result . encoding ) # result . text is always unicode content = result . text except : LOGGER . error ( u'Content could not be fetched from URL %s.' , url ) raise if config is None : # This can eventually raise SiteConfigNotFound config_string , matched_host = ftr_get_config ( url ) config = SiteConfig ( site_config_text = config_string , host = matched_host ) extractor = ContentExtractor ( config ) if base_url is None : base_url = url if extractor . process ( html = content ) : # This is recursive . Yeah . if extractor . next_page_link is not None : next_page_link = sanitize_next_page_link ( extractor . next_page_link , base_url ) next_extractor = ftr_process ( url = next_page_link , base_url = base_url ) extractor . body += next_extractor . body extractor . next_page_link = [ next_page_link ] if next_extractor . next_page_link is not None : extractor . next_page_link . extend ( next_extractor . next_page_link ) return extractor return None
def _delete_dir ( self ) : """Delete old folder if exists before start build"""
if not self . auto and os . path . isdir ( self . meta . build_path + self . prgnam ) : shutil . rmtree ( self . meta . build_path + self . prgnam )
def k ( self , symbol = '' , begin = None , end = None ) : '''读取k线信息 : param symbol : : param begin : : param end : : return : pd . dataFrame or None'''
with self . client . connect ( * self . bestip ) : data = self . client . get_k_data ( symbol , begin , end ) return data
def ReportConfiguration ( self , file ) : """: param file : Destination for report details : return : None"""
global encodingpar print >> file , BuildReportLine ( "FAM FILE" , self . fam_details ) print >> file , BuildReportLine ( "IMPUTE_ARCHIVES" , "%s:%s" % ( str ( self . chroms [ 0 ] ) , self . archives [ 0 ] ) ) idx = 0 for arch in self . archives [ 1 : ] : print >> file , BuildReportLine ( "" , "%s:%s" % ( str ( self . chroms [ idx + 1 ] ) , arch ) ) idx += 1 print >> file , BuildReportLine ( "ENCODING" , [ "Additive" , "Dominant" , "Recessive" , "Genotype" , "Raw" ] [ encoding ] ) print >> file , BuildReportLine ( "INFO-EXT" , Parser . info_ext ) print >> file , BuildReportLine ( "INFO-THRESH" , Parser . info_threshold )
def setup_method_options ( method , tuning_options ) : """prepare method specific options"""
kwargs = { } # pass size of parameter space as max iterations to methods that support it # it seems not all methods iterpret this value in the same manner maxiter = numpy . prod ( [ len ( v ) for v in tuning_options . tune_params . values ( ) ] ) kwargs [ 'maxiter' ] = maxiter if method in [ "Nelder-Mead" , "Powell" ] : kwargs [ 'maxfev' ] = maxiter elif method == "L-BFGS-B" : kwargs [ 'maxfun' ] = maxiter # pass eps to methods that support it if method in [ "CG" , "BFGS" , "L-BFGS-B" , "TNC" , "SLSQP" ] : kwargs [ 'eps' ] = tuning_options . eps elif method == "COBYLA" : kwargs [ 'rhobeg' ] = tuning_options . eps return kwargs
def _get_field_schema ( self ) : """Get a list of all of the default fields for this query type . If data is available in the monitor type , a list of field definitions will be returned ahead of the actual data , providing insight into the available fields . If no data is available in a monitor , this will block on recv ( ) . : return : list of dictionary fields with the field schema"""
self . update_format ( DetailedFormat ( ) ) for fields in self . execute ( ) : if 'fields' in fields : return fields [ 'fields' ]
def init ( driverName = None , debug = False ) : '''Constructs a new TTS engine instance or reuses the existing instance for the driver name . @ param driverName : Name of the platform specific driver to use . If None , selects the default driver for the operating system . @ type : str @ param debug : Debugging output enabled or not @ type debug : bool @ return : Engine instance @ rtype : L { engine . Engine }'''
try : eng = _activeEngines [ driverName ] except KeyError : eng = Engine ( driverName , debug ) _activeEngines [ driverName ] = eng return eng
def star ( n , alpha = 'faced' , center = ( 1 , 1 ) ) : """Create the star points of various design matrices Parameters n : int The number of variables in the design Optional alpha : str Available values are ' faced ' ( default ) , ' orthogonal ' , or ' rotatable ' center : array A 1 - by - 2 array of integers indicating the number of center points assigned in each block of the response surface design . Default is (1 , 1 ) . Returns H : 2d - array The star - point portion of the design matrix ( i . e . at + / - alpha ) a : scalar The alpha value to scale the star points with . Example > > > star ( 3) array ( [ [ - 1 . , 0 . , 0 . ] , [ 1 . , 0 . , 0 . ] , [ 0 . , - 1 . , 0 . ] , [ 0 . , 1 . , 0 . ] , [ 0 . , 0 . , - 1 . ] , [ 0 . , 0 . , 1 . ] ] )"""
# Star points at the center of each face of the factorial if alpha == 'faced' : a = 1 elif alpha == 'orthogonal' : nc = 2 ** n # factorial points nco = center [ 0 ] # center points to factorial na = 2 * n # axial points nao = center [ 1 ] # center points to axial design # value of alpha in orthogonal design a = ( n * ( 1 + nao / float ( na ) ) / ( 1 + nco / float ( nc ) ) ) ** 0.5 elif alpha == 'rotatable' : nc = 2 ** n # number of factorial points a = nc ** ( 0.25 ) # value of alpha in rotatable design else : raise ValueError ( 'Invalid value for "alpha": {:}' . format ( alpha ) ) # Create the actual matrix now . H = np . zeros ( ( 2 * n , n ) ) for i in range ( n ) : H [ 2 * i : 2 * i + 2 , i ] = [ - 1 , 1 ] H *= a return H , a
async def get_authenticated_user ( self , http_client : httpclient . AsyncHTTPClient = None ) -> Dict [ str , Any ] : """Gets the OAuth authorized user and access token . This method should be called from the handler for your OAuth callback URL to complete the registration process . We run the callback with the authenticated user dictionary . This dictionary will contain an ` ` access _ key ` ` which can be used to make authorized requests to this service on behalf of the user . The dictionary will also contain other fields such as ` ` name ` ` , depending on the service used . . . versionchanged : : 6.0 The ` ` callback ` ` argument was removed . Use the returned awaitable object instead ."""
handler = cast ( RequestHandler , self ) request_key = escape . utf8 ( handler . get_argument ( "oauth_token" ) ) oauth_verifier = handler . get_argument ( "oauth_verifier" , None ) request_cookie = handler . get_cookie ( "_oauth_request_token" ) if not request_cookie : raise AuthError ( "Missing OAuth request token cookie" ) handler . clear_cookie ( "_oauth_request_token" ) cookie_key , cookie_secret = [ base64 . b64decode ( escape . utf8 ( i ) ) for i in request_cookie . split ( "|" ) ] if cookie_key != request_key : raise AuthError ( "Request token does not match cookie" ) token = dict ( key = cookie_key , secret = cookie_secret ) # type : Dict [ str , Union [ str , bytes ] ] if oauth_verifier : token [ "verifier" ] = oauth_verifier if http_client is None : http_client = self . get_auth_http_client ( ) assert http_client is not None response = await http_client . fetch ( self . _oauth_access_token_url ( token ) ) access_token = _oauth_parse_response ( response . body ) user = await self . _oauth_get_user_future ( access_token ) if not user : raise AuthError ( "Error getting user" ) user [ "access_token" ] = access_token return user
def set_cmap_cb ( self , w , index ) : """This callback is invoked when the user selects a new color map from the preferences pane ."""
name = cmap . get_names ( ) [ index ] self . t_ . set ( color_map = name )
def _get_si ( ) : '''Authenticate with vCenter server and return service instance object .'''
url = config . get_cloud_config_value ( 'url' , get_configured_provider ( ) , __opts__ , search_global = False ) username = config . get_cloud_config_value ( 'user' , get_configured_provider ( ) , __opts__ , search_global = False ) password = config . get_cloud_config_value ( 'password' , get_configured_provider ( ) , __opts__ , search_global = False ) protocol = config . get_cloud_config_value ( 'protocol' , get_configured_provider ( ) , __opts__ , search_global = False , default = 'https' ) port = config . get_cloud_config_value ( 'port' , get_configured_provider ( ) , __opts__ , search_global = False , default = 443 ) return salt . utils . vmware . get_service_instance ( url , username , password , protocol = protocol , port = port )
def score_large_straight_yatzy ( dice : List [ int ] ) -> int : """Large straight scoring according to yatzy rules"""
dice_set = set ( dice ) if _are_two_sets_equal ( { 2 , 3 , 4 , 5 , 6 } , dice_set ) : return sum ( dice ) return 0
def _sheet_meta_from_prompts ( sheets , old_name , name , ct_paleo , ct_chron ) : """Guide the user to create a proper , standardized sheet name : param list sheets : Running list of sheet metadata : param str old _ name : Original sheet name : param str name : Data set name : param int ct _ paleo : Running count of paleoData tables : param int ct _ chron : Running count of chronData tables : return sheets paleo _ ct chron _ ct : Updated sheets and counts"""
cont = True # Loop until valid sheet name is built , or user gives up while cont : try : pc = input ( "Is this a (p)aleo or (c)hronology sheet?" ) . lower ( ) if pc in ( "p" , "c" , "paleo" , "chron" , "chronology" ) : tt = input ( "Is this a (d)istribution, (e)nsemble, (m)easurement, or (s)ummary sheet?" ) . lower ( ) if tt in EXCEL_SHEET_TYPES [ "distribution" ] or tt in EXCEL_SHEET_TYPES [ "ensemble" ] or tt in EXCEL_SHEET_TYPES [ "summary" ] or tt in EXCEL_SHEET_TYPES [ "measurement" ] : # valid answer , keep going if tt in EXCEL_SHEET_TYPES [ "distribution" ] : tt = "distribution" elif tt in EXCEL_SHEET_TYPES [ "summary" ] : tt = "summary" elif tt in EXCEL_SHEET_TYPES [ "ensemble" ] : tt = "ensemble" elif tt in EXCEL_SHEET_TYPES [ "measurement" ] : tt = "measurement" if pc in EXCEL_SHEET_TYPES [ "paleo" ] : if tt in [ "ensemble" , "summary" ] : sheet = "{}{}{}{}" . format ( "paleo" , ct_paleo , tt , 1 ) else : sheet = "{}{}{}" . format ( "paleo" , ct_paleo , tt ) elif pc in EXCEL_SHEET_TYPES [ "chron" ] : if tt in [ "ensemble" , "summary" ] : sheet = "{}{}{}{}" . format ( "chron" , ct_chron , tt , 1 ) else : sheet = "{}{}{}" . format ( "chron" , ct_chron , tt ) # Test the sheet that was built from the user responses . # If it matches the Regex , then continue to build the sheet metadata . If not , try again or skip sheet . m = re . match ( re_sheet , sheet . lower ( ) ) if m : sheets , ct_paleo , ct_chron = _sheet_meta_from_regex ( m , sheets , old_name , name , ct_paleo , ct_chron ) print ( "Sheet created: {}" . format ( sheet ) ) cont = False else : resp = input ( "invalid sheet name. try again? (y/n): " ) if resp == "n" : print ( "No valid sheet name was created. Skipping sheet: {}" . format ( sheet ) ) cont = False except Exception as e : logger_excel . debug ( "excel: sheet_meta_from_prompts: error during prompts, {}" . format ( e ) ) cont = False print ( "=====================================================" ) return sheets , ct_paleo , ct_chron
def start_client ( self , event = None ) : """Negotiate a new SSH2 session as a client . This is the first step after creating a new L { Transport } . A separate thread is created for protocol negotiation . If an event is passed in , this method returns immediately . When negotiation is done ( successful or not ) , the given C { Event } will be triggered . On failure , L { is _ active } will return C { False } . ( Since 1.4 ) If C { event } is C { None } , this method will not return until negotation is done . On success , the method returns normally . Otherwise an SSHException is raised . After a successful negotiation , you will usually want to authenticate , calling L { auth _ password < Transport . auth _ password > } or L { auth _ publickey < Transport . auth _ publickey > } . @ note : L { connect } is a simpler method for connecting as a client . @ note : After calling this method ( or L { start _ server } or L { connect } ) , you should no longer directly read from or write to the original socket object . @ param event : an event to trigger when negotiation is complete ( optional ) @ type event : threading . Event @ raise SSHException : if negotiation fails ( and no C { event } was passed in )"""
self . active = True if event is not None : # async , return immediately and let the app poll for completion self . completion_event = event self . start ( ) return # synchronous , wait for a result self . completion_event = event = threading . Event ( ) self . start ( ) Random . atfork ( ) while True : event . wait ( 0.1 ) if not self . active : e = self . get_exception ( ) if e is not None : raise e raise SSHException ( 'Negotiation failed.' ) if event . isSet ( ) : break
def find_include_file ( self , t ) : """Finds the # include file for a given preprocessor tuple ."""
fname = t [ 2 ] for d in self . searchpath [ t [ 1 ] ] : if d == os . curdir : f = fname else : f = os . path . join ( d , fname ) if os . path . isfile ( f ) : return f return None
def _unique_constrains ( cls ) : """Get all ( single column and multi column ) unique constraints"""
unique = [ { c . name for c in u . columns } for u in cls . __table_args__ if isinstance ( u , UniqueConstraint ) ] unique . extend ( { c . name } for c in cls . __table__ . columns if c . unique ) return unique
def validate ( self , value ) : """Validate value . Args : value : model value . Returns : Whether the specified value is valid data type value . Raises : BadValueError : when value is not of self . data _ type type ."""
if value is not None and not isinstance ( value , self . data_type ) : raise datastore_errors . BadValueError ( "Property %s must be convertible to a %s instance (%s)" % ( self . name , self . data_type , value ) ) return super ( JsonProperty , self ) . validate ( value )
def coordinator_dead ( self , error ) : """Mark the current coordinator as dead ."""
if self . coordinator_id is not None : log . warning ( "Marking the coordinator dead (node %s) for group %s: %s." , self . coordinator_id , self . group_id , error ) self . coordinator_id = None
def _formatOntologyTermObject ( self , terms , element_type ) : """Formats the ontology term object for query"""
elementClause = None if not isinstance ( terms , collections . Iterable ) : terms = [ terms ] elements = [ ] for term in terms : if term . term_id : elements . append ( '?{} = <{}> ' . format ( element_type , term . term_id ) ) else : elements . append ( '?{} = <{}> ' . format ( element_type , self . _toNamespaceURL ( term . term ) ) ) elementClause = "({})" . format ( " || " . join ( elements ) ) return elementClause
def encrypt ( self , k , a , m ) : """Encrypt accoriding to the selected encryption and hashing functions . : param k : Encryption key ( optional ) : param a : Additional Authentication Data : param m : Plaintext Returns a dictionary with the computed data ."""
iv = _randombits ( 96 ) cipher = Cipher ( algorithms . AES ( k ) , modes . GCM ( iv ) , backend = self . backend ) encryptor = cipher . encryptor ( ) encryptor . authenticate_additional_data ( a ) e = encryptor . update ( m ) + encryptor . finalize ( ) return ( iv , e , encryptor . tag )
def addons ( cls , recurse = True ) : """Returns a dictionary containing all the available addons for this mixin class . If the optional recurse flag is set to True , then all the base classes will be searched for the given addon as well . : param recurse | < bool > : return { < str > name : < variant > addon , . . }"""
cls . initAddons ( ) prop = '_{0}__addons' . format ( cls . __name__ ) out = { } # lookup base classes if recurse : for base in cls . __bases__ : if issubclass ( base , AddonManager ) : out . update ( base . addons ( recurse ) ) # always use the highest level for any given key out . update ( getattr ( cls , prop , { } ) ) return out
async def workerTypeHealth ( self , * args , ** kwargs ) : """Look up the resource health for a workerType Return a view of the health of a given worker type This method gives output : ` ` v1 / health . json # ` ` This method is ` ` experimental ` `"""
return await self . _makeApiCall ( self . funcinfo [ "workerTypeHealth" ] , * args , ** kwargs )
def submit_as_gauge_and_monotonic_count ( self , metric_suffix , metric , scraper_config ) : """submit a kube _ dns metric both as a gauge ( for compatibility ) and as a monotonic _ count"""
metric_name = scraper_config [ 'namespace' ] + metric_suffix for sample in metric . samples : # Explicit shallow copy of the instance tags _tags = list ( scraper_config [ 'custom_tags' ] ) for label_name , label_value in iteritems ( sample [ self . SAMPLE_LABELS ] ) : _tags . append ( '{}:{}' . format ( label_name , label_value ) ) # submit raw metric self . gauge ( metric_name , sample [ self . SAMPLE_VALUE ] , _tags ) # submit rate metric self . monotonic_count ( metric_name + '.count' , sample [ self . SAMPLE_VALUE ] , _tags )
def median ( self , * args , ** kwargs ) : '''geo . median ( axis = None , out = None , overwrite _ input = False ) axis : int , optional Axis along which the medians are computed . The default ( axis = None ) is to compute the median along a flattened version of the array . out : ndarray , optional Alternative output array in which to place the result . It must have the same shape and buffer length as the expected output , but the type ( of the output ) will be cast if necessary . overwrite _ input : bool , optional If True , then allow use of memory of input array ( a ) for calculations . The input array will be modified by the call to median . This will save memory when you do not need to preserve the contents of the input array . Treat the input as undefined , but it will probably be fully or partially sorted . Default is False . Note that , if ` overwrite _ input ` is True and the input is not already an ndarray , an error will be raised .'''
return np . ma . median ( self . raster , * args , ** kwargs )
def get_endpoint_map ( self ) : """returns API version and endpoint map"""
log . debug ( "getting end points..." ) cmd , url = DEVICE_URLS [ "get_endpoint_map" ] return self . _exec ( cmd , url )
def to ( self , * args ) : '''get / set the ' device ' to which messages are sent . Valid targets are : string filenames : ' / tmp / test . log ' remote hosts : ' pretoria : 1701' system devices : sys . stdout , sys . stderr special names : ' stdout ' file handles : open ( ' / tmp / test . log ' )'''
if len ( args ) : self . _logFile = args [ 0 ] if self . _logHandle and self . _logHandle != sys . stdout : self . _logHandle . close ( ) # if type ( self . _ logFile ) is types . FileType : if isinstance ( self . _logFile , IOBase ) : self . _logHandle = self . _logFile elif self . _logFile == 'stdout' : self . _logHandle = sys . stdout elif self . socket_parse ( self . _logFile ) : self . _logHandle = C_dgmsocket ( self . _socketRemote , int ( self . _socketPort ) ) else : self . _logHandle = open ( self . _logFile , "a" ) self . _sys_stdout = self . _logHandle else : return self . _logFile
def webui_schematics_panels_panel_properties_height ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) webui = ET . SubElement ( config , "webui" , xmlns = "http://tail-f.com/ns/webui" ) schematics = ET . SubElement ( webui , "schematics" ) panels = ET . SubElement ( schematics , "panels" ) panel = ET . SubElement ( panels , "panel" ) name_key = ET . SubElement ( panel , "name" ) name_key . text = kwargs . pop ( 'name' ) properties = ET . SubElement ( panel , "properties" ) height = ET . SubElement ( properties , "height" ) height . text = kwargs . pop ( 'height' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def __add_symbols ( self , cmd ) : """Add all additional defined and undefined symbols ."""
if self . __config . define_symbols : symbols = self . __config . define_symbols cmd . append ( '' . join ( [ ' -D"%s"' % def_symbol for def_symbol in symbols ] ) ) if self . __config . undefine_symbols : un_symbols = self . __config . undefine_symbols cmd . append ( '' . join ( [ ' -U"%s"' % undef_symbol for undef_symbol in un_symbols ] ) ) return cmd
def make_all_dirs ( path , mode = 0o777 ) : """Ensure local dir , with all its parent dirs , are created . Unlike os . makedirs ( ) , will not fail if the path already exists ."""
# Avoid races inherent to doing this in two steps ( check then create ) . # Python 3 has exist _ ok but the approach below works for Python 2 + 3. # https : / / stackoverflow . com / questions / 600268 / mkdir - p - functionality - in - python try : os . makedirs ( path , mode = mode ) except OSError as e : if e . errno == errno . EEXIST and os . path . isdir ( path ) : pass else : raise return path
def _float ( value ) : """Conversion of state vector field , with automatic unit handling"""
if "[" in value : # There is a unit field value , sep , unit = value . partition ( "[" ) unit = sep + unit # As defined in the CCSDS Orbital Data Message Blue Book , the unit should # be the same as defined in table 3-3 which are for km and km / s for position and # velocity respectively . Thus , there should be no other conversion to make if unit in ( "[km]" , "[km/s]" ) : multiplier = 1000 elif unit == "[s]" : multiplier = 1 else : raise ValueError ( "Unknown unit for this field" , unit ) else : # if no unit is provided , the default is km , and km / s multiplier = 1000 return float ( value ) * multiplier
def setup ( app ) : '''Required Sphinx extension setup function .'''
app . add_role ( 'bokeh-commit' , bokeh_commit ) app . add_role ( 'bokeh-issue' , bokeh_issue ) app . add_role ( 'bokeh-pull' , bokeh_pull ) app . add_role ( 'bokeh-tree' , bokeh_tree )
def check_service ( self , info ) : """Checks the network for a unique service name , modifying the ServiceInfo passed in if it is not unique ."""
now = current_time_millis ( ) next_time = now i = 0 while i < 3 : for record in self . cache . entries_with_name ( info . type ) : if record . type == _TYPE_PTR and not record . is_expired ( now ) and record . alias == info . name : if ( info . name . find ( '.' ) < 0 ) : info . name = info . name + ".[" + info . address + ":" + info . port + "]." + info . type self . check_service ( info ) return raise NonUniqueNameException if now < next_time : self . wait ( next_time - now ) now = current_time_millis ( ) continue out = DNSOutgoing ( _FLAGS_QR_QUERY | _FLAGS_AA ) self . debug = out out . add_question ( DNSQuestion ( info . type , _TYPE_PTR , _CLASS_IN ) ) out . add_authorative_answer ( DNSPointer ( info . type , _TYPE_PTR , _CLASS_IN , info . ttl , info . name ) ) self . send ( out ) i += 1 next_time += _CHECK_TIME
def unzip ( input_layer , split_dim = 0 , num_splits = 2 ) : """Unzips this Tensor along the split _ dim into num _ splits Equal chunks . Examples : * ` [ 1 , 2 , 3 , 4 ] - > [ 1 , 3 ] , [ 2 , 4 ] ` * ` [ [ 1 , 1 ] , [ 2 , 2 ] , [ 3 , 3 ] , [ 4 , 4 ] ] - > [ [ 1 , 1 ] , [ 3 , 3 ] ] , [ [ 2 , 2 ] , [ 4 , 4 ] ] ` Args : input _ layer : The chainable object , supplied . split _ dim : The dimension to split along . Defaults to batch . num _ splits : The number of splits . Returns : A list of PrettyTensors . Raises : ValueError : If split _ dim is out of range or isn ' t divided evenly by num _ splits ."""
shape = input_layer . shape _check_split_dims ( num_splits , split_dim , shape ) splits = functions . unzip ( input_layer , split_dim , shape [ split_dim ] , num_splits ) return input_layer . with_sequence ( splits )
def unapostrophe ( text ) : """Strip apostrophe and ' s ' from the end of a string ."""
text = re . sub ( r'[%s]s?$' % '' . join ( APOSTROPHES ) , '' , text ) return text
def clean_dictkeys ( ddict , exclusions = None ) : """Exclude chars in dict keys and return a clean dictionary ."""
exclusions = [ ] if exclusions is None else exclusions if not isinstance ( ddict , dict ) : return { } for key in list ( ddict . keys ( ) ) : if [ incl for incl in exclusions if incl in key ] : data = ddict . pop ( key ) clean_key = exclude_chars ( key , exclusions ) if clean_key : if clean_key in ddict : ddict [ clean_key ] = force_list ( ddict [ clean_key ] ) add_element ( ddict , clean_key , data ) else : ddict [ clean_key ] = data # dict case if isinstance ( ddict . get ( key ) , dict ) : ddict [ key ] = clean_dictkeys ( ddict [ key ] , exclusions ) # list case elif isinstance ( ddict . get ( key ) , list ) : for row in ddict [ key ] : if isinstance ( row , dict ) : row = clean_dictkeys ( row , exclusions ) return ddict
def scp_put ( files , remote_path = None , recursive = False , preserve_times = False , saltenv = 'base' , ** kwargs ) : '''. . versionadded : : 2019.2.0 Transfer files and directories to remote network device . . . note : : This function is only available only when the underlying library ` scp < https : / / github . com / jbardin / scp . py > ` _ is installed . See : mod : ` scp module < salt . modules . scp _ mod > ` for more details . files A single path or a list of paths to be transferred . remote _ path The path on the remote device where to store the files . recursive : ` ` True ` ` Transfer files and directories recursively . preserve _ times : ` ` False ` ` Preserve ` ` mtime ` ` and ` ` atime ` ` of transferred files and directories . saltenv : ` ` base ` ` The name of the Salt environment . Ignored when ` ` files ` ` is not a ` ` salt : / / ` ` URL . hostname The hostname of the remote device . port : ` ` 22 ` ` The port of the remote device . username The username required for SSH authentication on the device . password Used for password authentication . It is also used for private key decryption if ` ` passphrase ` ` is not given . passphrase Used for decrypting private keys . pkey An optional private key to use for authentication . key _ filename The filename , or list of filenames , of optional private key ( s ) and / or certificates to try for authentication . timeout An optional timeout ( in seconds ) for the TCP connect . socket _ timeout : ` ` 10 ` ` The channel socket timeout in seconds . buff _ size : ` ` 16384 ` ` The size of the SCP send buffer . allow _ agent : ` ` True ` ` Set to ` ` False ` ` to disable connecting to the SSH agent . look _ for _ keys : ` ` True ` ` Set to ` ` False ` ` to disable searching for discoverable private key files in ` ` ~ / . ssh / ` ` banner _ timeout An optional timeout ( in seconds ) to wait for the SSH banner to be presented . auth _ timeout An optional timeout ( in seconds ) to wait for an authentication response . auto _ add _ policy : ` ` False ` ` Automatically add the host to the ` ` known _ hosts ` ` . CLI Example : . . code - block : : bash salt ' * ' napalm . scp _ put / path / to / file / var / tmp / file auto _ add _ policy = True'''
conn_args = netmiko_args ( ** kwargs ) conn_args [ 'hostname' ] = conn_args [ 'host' ] kwargs . update ( conn_args ) return __salt__ [ 'scp.put' ] ( files , remote_path = remote_path , recursive = recursive , preserve_times = preserve_times , saltenv = saltenv , ** kwargs )
def bind ( self , sock ) : """Wrap and return the given socket ."""
if self . context is None : self . context = self . get_context ( ) conn = SSLConnection ( self . context , sock ) self . _environ = self . get_environ ( ) return conn
def connect_put_node_proxy_with_path ( self , name , path , ** kwargs ) : """connect PUT requests to proxy of Node This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . connect _ put _ node _ proxy _ with _ path ( name , path , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str name : name of the NodeProxyOptions ( required ) : param str path : path to the resource ( required ) : param str path2 : Path is the URL path to use for the current proxy request to node . : return : str If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . connect_put_node_proxy_with_path_with_http_info ( name , path , ** kwargs ) else : ( data ) = self . connect_put_node_proxy_with_path_with_http_info ( name , path , ** kwargs ) return data
def get_tdms_files ( directory ) : """Recursively find projects based on ' . tdms ' file endings Searches the ` directory ` recursively and return a sorted list of all found ' . tdms ' project files , except fluorescence data trace files which end with ` _ traces . tdms ` ."""
path = pathlib . Path ( directory ) . resolve ( ) # get all tdms files tdmslist = [ r for r in path . rglob ( "*.tdms" ) if r . is_file ( ) ] # exclude traces files tdmslist = [ r for r in tdmslist if not r . name . endswith ( "_traces.tdms" ) ] return sorted ( tdmslist )
def reset_object ( self , driver_wrapper = None ) : """Reset each page element object : param driver _ wrapper : driver wrapper instance"""
if driver_wrapper : self . driver_wrapper = driver_wrapper for element in self . _page_elements : element . reset_object ( driver_wrapper ) self . _web_elements = [ ] self . _page_elements = [ ]
def seen_tasks ( self ) : """Shows a list of seen task types ."""
print ( '\n' . join ( self . _stub . seen_tasks ( clearly_pb2 . Empty ( ) ) . task_types ) )
def removeSinglePixels ( img ) : '''img - boolean array remove all pixels that have no neighbour'''
gx = img . shape [ 0 ] gy = img . shape [ 1 ] for i in range ( gx ) : for j in range ( gy ) : if img [ i , j ] : found_neighbour = False for ii in range ( max ( 0 , i - 1 ) , min ( gx , i + 2 ) ) : for jj in range ( max ( 0 , j - 1 ) , min ( gy , j + 2 ) ) : if ii == i and jj == j : continue if img [ ii , jj ] : found_neighbour = True break if found_neighbour : break if not found_neighbour : img [ i , j ] = 0
def speakerDiarization ( filename , n_speakers , mt_size = 2.0 , mt_step = 0.2 , st_win = 0.05 , lda_dim = 35 , plot_res = False ) : '''ARGUMENTS : - filename : the name of the WAV file to be analyzed - n _ speakers the number of speakers ( clusters ) in the recording ( < = 0 for unknown ) - mt _ size ( opt ) mid - term window size - mt _ step ( opt ) mid - term window step - st _ win ( opt ) short - term window size - lda _ dim ( opt ) LDA dimension ( 0 for no LDA ) - plot _ res ( opt ) 0 for not plotting the results 1 for plottingy'''
[ fs , x ] = audioBasicIO . readAudioFile ( filename ) x = audioBasicIO . stereo2mono ( x ) duration = len ( x ) / fs [ classifier_1 , MEAN1 , STD1 , classNames1 , mtWin1 , mtStep1 , stWin1 , stStep1 , computeBEAT1 ] = aT . load_model_knn ( os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , "data" , "knnSpeakerAll" ) ) [ classifier_2 , MEAN2 , STD2 , classNames2 , mtWin2 , mtStep2 , stWin2 , stStep2 , computeBEAT2 ] = aT . load_model_knn ( os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , "data" , "knnSpeakerFemaleMale" ) ) [ mt_feats , st_feats , _ ] = aF . mtFeatureExtraction ( x , fs , mt_size * fs , mt_step * fs , round ( fs * st_win ) , round ( fs * st_win * 0.5 ) ) MidTermFeatures2 = numpy . zeros ( ( mt_feats . shape [ 0 ] + len ( classNames1 ) + len ( classNames2 ) , mt_feats . shape [ 1 ] ) ) for i in range ( mt_feats . shape [ 1 ] ) : cur_f1 = ( mt_feats [ : , i ] - MEAN1 ) / STD1 cur_f2 = ( mt_feats [ : , i ] - MEAN2 ) / STD2 [ res , P1 ] = aT . classifierWrapper ( classifier_1 , "knn" , cur_f1 ) [ res , P2 ] = aT . classifierWrapper ( classifier_2 , "knn" , cur_f2 ) MidTermFeatures2 [ 0 : mt_feats . shape [ 0 ] , i ] = mt_feats [ : , i ] MidTermFeatures2 [ mt_feats . shape [ 0 ] : mt_feats . shape [ 0 ] + len ( classNames1 ) , i ] = P1 + 0.0001 MidTermFeatures2 [ mt_feats . shape [ 0 ] + len ( classNames1 ) : : , i ] = P2 + 0.0001 mt_feats = MidTermFeatures2 # TODO iFeaturesSelect = [ 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 ] mt_feats = mt_feats [ iFeaturesSelect , : ] ( mt_feats_norm , MEAN , STD ) = aT . normalizeFeatures ( [ mt_feats . T ] ) mt_feats_norm = mt_feats_norm [ 0 ] . T n_wins = mt_feats . shape [ 1 ] # remove outliers : dist_all = numpy . sum ( distance . squareform ( distance . pdist ( mt_feats_norm . T ) ) , axis = 0 ) m_dist_all = numpy . mean ( dist_all ) i_non_outliers = numpy . nonzero ( dist_all < 1.2 * m_dist_all ) [ 0 ] # TODO : Combine energy threshold for outlier removal : # EnergyMin = numpy . min ( mt _ feats [ 1 , : ] ) # EnergyMean = numpy . mean ( mt _ feats [ 1 , : ] ) # Thres = ( 1.5 * EnergyMin + 0.5 * EnergyMean ) / 2.0 # i _ non _ outliers = numpy . nonzero ( mt _ feats [ 1 , : ] > Thres ) [ 0] # print i _ non _ outliers perOutLier = ( 100.0 * ( n_wins - i_non_outliers . shape [ 0 ] ) ) / n_wins mt_feats_norm_or = mt_feats_norm mt_feats_norm = mt_feats_norm [ : , i_non_outliers ] # LDA dimensionality reduction : if lda_dim > 0 : # [ mt _ feats _ to _ red , _ , _ ] = aF . mtFeatureExtraction ( x , fs , mt _ size * fs , st _ win * fs , round ( fs * st _ win ) , round ( fs * st _ win ) ) ; # extract mid - term features with minimum step : mt_win_ratio = int ( round ( mt_size / st_win ) ) mt_step_ratio = int ( round ( st_win / st_win ) ) mt_feats_to_red = [ ] num_of_features = len ( st_feats ) num_of_stats = 2 # for i in range ( num _ of _ stats * num _ of _ features + 1 ) : for i in range ( num_of_stats * num_of_features ) : mt_feats_to_red . append ( [ ] ) for i in range ( num_of_features ) : # for each of the short - term features : curPos = 0 N = len ( st_feats [ i ] ) while ( curPos < N ) : N1 = curPos N2 = curPos + mt_win_ratio if N2 > N : N2 = N curStFeatures = st_feats [ i ] [ N1 : N2 ] mt_feats_to_red [ i ] . append ( numpy . mean ( curStFeatures ) ) mt_feats_to_red [ i + num_of_features ] . append ( numpy . std ( curStFeatures ) ) curPos += mt_step_ratio mt_feats_to_red = numpy . array ( mt_feats_to_red ) mt_feats_to_red_2 = numpy . zeros ( ( mt_feats_to_red . shape [ 0 ] + len ( classNames1 ) + len ( classNames2 ) , mt_feats_to_red . shape [ 1 ] ) ) for i in range ( mt_feats_to_red . shape [ 1 ] ) : cur_f1 = ( mt_feats_to_red [ : , i ] - MEAN1 ) / STD1 cur_f2 = ( mt_feats_to_red [ : , i ] - MEAN2 ) / STD2 [ res , P1 ] = aT . classifierWrapper ( classifier_1 , "knn" , cur_f1 ) [ res , P2 ] = aT . classifierWrapper ( classifier_2 , "knn" , cur_f2 ) mt_feats_to_red_2 [ 0 : mt_feats_to_red . shape [ 0 ] , i ] = mt_feats_to_red [ : , i ] mt_feats_to_red_2 [ mt_feats_to_red . shape [ 0 ] : mt_feats_to_red . shape [ 0 ] + len ( classNames1 ) , i ] = P1 + 0.0001 mt_feats_to_red_2 [ mt_feats_to_red . shape [ 0 ] + len ( classNames1 ) : : , i ] = P2 + 0.0001 mt_feats_to_red = mt_feats_to_red_2 mt_feats_to_red = mt_feats_to_red [ iFeaturesSelect , : ] # mt _ feats _ to _ red + = numpy . random . rand ( mt _ feats _ to _ red . shape [ 0 ] , mt _ feats _ to _ red . shape [ 1 ] ) * 0.0000010 ( mt_feats_to_red , MEAN , STD ) = aT . normalizeFeatures ( [ mt_feats_to_red . T ] ) mt_feats_to_red = mt_feats_to_red [ 0 ] . T # dist _ all = numpy . sum ( distance . squareform ( distance . pdist ( mt _ feats _ to _ red . T ) ) , axis = 0) # m _ dist _ all = numpy . mean ( dist _ all ) # iNonOutLiers2 = numpy . nonzero ( dist _ all < 3.0 * m _ dist _ all ) [ 0] # mt _ feats _ to _ red = mt _ feats _ to _ red [ : , iNonOutLiers2] Labels = numpy . zeros ( ( mt_feats_to_red . shape [ 1 ] , ) ) ; LDAstep = 1.0 LDAstepRatio = LDAstep / st_win # print LDAstep , LDAstepRatio for i in range ( Labels . shape [ 0 ] ) : Labels [ i ] = int ( i * st_win / LDAstepRatio ) ; clf = sklearn . discriminant_analysis . LinearDiscriminantAnalysis ( n_components = lda_dim ) clf . fit ( mt_feats_to_red . T , Labels ) mt_feats_norm = ( clf . transform ( mt_feats_norm . T ) ) . T if n_speakers <= 0 : s_range = range ( 2 , 10 ) else : s_range = [ n_speakers ] clsAll = [ ] sil_all = [ ] centersAll = [ ] for iSpeakers in s_range : k_means = sklearn . cluster . KMeans ( n_clusters = iSpeakers ) k_means . fit ( mt_feats_norm . T ) cls = k_means . labels_ means = k_means . cluster_centers_ # Y = distance . squareform ( distance . pdist ( mt _ feats _ norm . T ) ) clsAll . append ( cls ) centersAll . append ( means ) sil_1 = [ ] ; sil_2 = [ ] for c in range ( iSpeakers ) : # for each speaker ( i . e . for each extracted cluster ) clust_per_cent = numpy . nonzero ( cls == c ) [ 0 ] . shape [ 0 ] / float ( len ( cls ) ) if clust_per_cent < 0.020 : sil_1 . append ( 0.0 ) sil_2 . append ( 0.0 ) else : # get subset of feature vectors mt_feats_norm_temp = mt_feats_norm [ : , cls == c ] # compute average distance between samples # that belong to the cluster ( a values ) Yt = distance . pdist ( mt_feats_norm_temp . T ) sil_1 . append ( numpy . mean ( Yt ) * clust_per_cent ) silBs = [ ] for c2 in range ( iSpeakers ) : # compute distances from samples of other clusters if c2 != c : clust_per_cent_2 = numpy . nonzero ( cls == c2 ) [ 0 ] . shape [ 0 ] / float ( len ( cls ) ) MidTermFeaturesNormTemp2 = mt_feats_norm [ : , cls == c2 ] Yt = distance . cdist ( mt_feats_norm_temp . T , MidTermFeaturesNormTemp2 . T ) silBs . append ( numpy . mean ( Yt ) * ( clust_per_cent + clust_per_cent_2 ) / 2.0 ) silBs = numpy . array ( silBs ) # . . . and keep the minimum value ( i . e . # the distance from the " nearest " cluster ) sil_2 . append ( min ( silBs ) ) sil_1 = numpy . array ( sil_1 ) ; sil_2 = numpy . array ( sil_2 ) ; sil = [ ] for c in range ( iSpeakers ) : # for each cluster ( speaker ) compute silhouette sil . append ( ( sil_2 [ c ] - sil_1 [ c ] ) / ( max ( sil_2 [ c ] , sil_1 [ c ] ) + 0.00001 ) ) # keep the AVERAGE SILLOUETTE sil_all . append ( numpy . mean ( sil ) ) imax = numpy . argmax ( sil_all ) # optimal number of clusters nSpeakersFinal = s_range [ imax ] # generate the final set of cluster labels # ( important : need to retrieve the outlier windows : # this is achieved by giving them the value of their # nearest non - outlier window ) cls = numpy . zeros ( ( n_wins , ) ) for i in range ( n_wins ) : j = numpy . argmin ( numpy . abs ( i - i_non_outliers ) ) cls [ i ] = clsAll [ imax ] [ j ] # Post - process method 1 : hmm smoothing for i in range ( 1 ) : # hmm training start_prob , transmat , means , cov = trainHMM_computeStatistics ( mt_feats_norm_or , cls ) hmm = hmmlearn . hmm . GaussianHMM ( start_prob . shape [ 0 ] , "diag" ) hmm . startprob_ = start_prob hmm . transmat_ = transmat hmm . means_ = means ; hmm . covars_ = cov cls = hmm . predict ( mt_feats_norm_or . T ) # Post - process method 2 : median filtering : cls = scipy . signal . medfilt ( cls , 13 ) cls = scipy . signal . medfilt ( cls , 11 ) sil = sil_all [ imax ] class_names = [ "speaker{0:d}" . format ( c ) for c in range ( nSpeakersFinal ) ] ; # load ground - truth if available gt_file = filename . replace ( '.wav' , '.segments' ) # if groundturh exists if os . path . isfile ( gt_file ) : [ seg_start , seg_end , seg_labs ] = readSegmentGT ( gt_file ) flags_gt , class_names_gt = segs2flags ( seg_start , seg_end , seg_labs , mt_step ) if plot_res : fig = plt . figure ( ) if n_speakers > 0 : ax1 = fig . add_subplot ( 111 ) else : ax1 = fig . add_subplot ( 211 ) ax1 . set_yticks ( numpy . array ( range ( len ( class_names ) ) ) ) ax1 . axis ( ( 0 , duration , - 1 , len ( class_names ) ) ) ax1 . set_yticklabels ( class_names ) ax1 . plot ( numpy . array ( range ( len ( cls ) ) ) * mt_step + mt_step / 2.0 , cls ) if os . path . isfile ( gt_file ) : if plot_res : ax1 . plot ( numpy . array ( range ( len ( flags_gt ) ) ) * mt_step + mt_step / 2.0 , flags_gt , 'r' ) purity_cluster_m , purity_speaker_m = evaluateSpeakerDiarization ( cls , flags_gt ) print ( "{0:.1f}\t{1:.1f}" . format ( 100 * purity_cluster_m , 100 * purity_speaker_m ) ) if plot_res : plt . title ( "Cluster purity: {0:.1f}% - " "Speaker purity: {1:.1f}%" . format ( 100 * purity_cluster_m , 100 * purity_speaker_m ) ) if plot_res : plt . xlabel ( "time (seconds)" ) # print s _ range , sil _ all if n_speakers <= 0 : plt . subplot ( 212 ) plt . plot ( s_range , sil_all ) plt . xlabel ( "number of clusters" ) ; plt . ylabel ( "average clustering's sillouette" ) ; plt . show ( ) return cls
def extract_twin_values ( triples , traits , gender = None ) : """Calculate the heritability of certain traits in triplets . Parameters triples : ( a , b , " Female / Male " ) triples . The sample IDs are then used to query the traits dictionary . traits : sample _ id = > value dictionary Returns tuples of size 2 , that contain paired trait values of the twins"""
# Construct the pairs of trait values traitValuesAbsent = 0 nanValues = 0 genderSkipped = 0 twinValues = [ ] for a , b , t in triples : if gender is not None and t != gender : genderSkipped += 1 continue if not ( a in traits and b in traits ) : traitValuesAbsent += 1 continue if np . isnan ( traits [ a ] ) or np . isnan ( traits [ b ] ) : nanValues += 1 continue twinValues . append ( ( traits [ a ] , traits [ b ] ) ) print ( "A total of {} pairs extracted ({} absent; {} nan; {} genderSkipped)" . format ( len ( twinValues ) , traitValuesAbsent , nanValues , genderSkipped ) ) return twinValues
def get_blocks_struct ( self ) : """Return a dictionary with block ids keyed to ( x , y , z ) ."""
cur_x = 0 cur_y = 0 cur_z = 0 blocks = { } for block_id in self . blocksList : blocks [ ( cur_x , cur_y , cur_z ) ] = block_id cur_y += 1 if ( cur_y > 127 ) : cur_y = 0 cur_z += 1 if ( cur_z > 15 ) : cur_z = 0 cur_x += 1 return blocks
def _valid_ip ( ip_address ) : '''Check if the IP address is valid and routable Return either True or False'''
try : address = ipaddress . IPv4Address ( ip_address ) except ipaddress . AddressValueError : return False if address . is_unspecified or address . is_loopback or address . is_link_local or address . is_multicast or address . is_reserved : return False return True
def first_of ( obj , * attrs ) : """: param obj : : param attrs : a list of strings : return : the first truthy attribute of obj , calling it as a function if necessary ."""
for attr in attrs : r = resolve ( obj , attr ) if r : return r
def set_keyboard_focus ( self , move_up , move_down , select ) : """Set the keyboard as the object that controls the menu . move _ up is from the pygame . KEYS enum that defines what button causes the menu selection to move up . move _ down is from the pygame . KEYS enum that defines what button causes the menu selection to move down . select is from the pygame . KEYS enum that defines what button causes the button to be selected ."""
self . input_focus = StateTypes . KEYBOARD self . move_up_button = move_up self . move_down_button = move_down self . select_button = select
def hide ( self , fromQtmacs : bool = False ) : """Overloaded ` ` hide ( ) ` ` function to avoid calling it accidentally . This method is a ( weak ) security mechanism to prevent programmers from using the ` ` hide ` ` method as originally intended by Qt , ie . to hide a window . However , in Qtmacs , applets can only be made invisible by either killing them with ` ` qteKillApplet ` ` or replacing them with another applet ( see eg . ` ` qteReplaceAppletInLayout ` ` ) . Using the Qt native ` ` hide ( ) ` ` method messes with the Qtmacs layout engine and can lead to unexpected visual results and more serious errors . This method should only be used by ` ` QtmacsMain ` ` to implement the layout but not by any applets or macros , unless it is for a widget not under the control of Qtmacs ( probably a bad idea ) . | Args | * * fromQtmacs ( * * bool * * ) : if * * True * * then the original ` ` hide ( ) ` ` method is called | Returns | * * * None * * | Raises | * * * QtmacsArgumentError * * if at least one argument has an invalid type ."""
if not fromQtmacs : # Log a warning message if someone tries to call the # native hide ( ) method . msg = ( 'hide() command for applet <b>{}</b> ignored. Use ' ' qteNewApplet instead.' . format ( self . qteAppletID ( ) ) ) self . qteLogger . warning ( msg ) else : # If we are explicitly requested to hide ( ) the widget , # then clear the Qtmacs internal visibility flag ( will be # used in the focus manager to double check that Qtmacs # and Qt agree on which widgets are visible ) , remove the # applet from the QtmacsSplitter by re - parenting it to # * * None * * , and tell Qt to actually hide the widget as # soon as the event loop is in control again . self . _qteAdmin . isVisible = False self . qteReparent ( None ) QtGui . QWidget . hide ( self )
def FqdnUrl ( v ) : """Verify that the value is a Fully qualified domain name URL . > > > s = Schema ( FqdnUrl ( ) ) > > > with raises ( MultipleInvalid , ' expected a Fully qualified domain name URL ' ) : . . . s ( " http : / / localhost / " ) > > > s ( ' http : / / w3 . org ' ) ' http : / / w3 . org '"""
try : parsed_url = _url_validation ( v ) if "." not in parsed_url . netloc : raise UrlInvalid ( "must have a domain name in URL" ) return v except : raise ValueError
def _get_violations ( self , query , record ) : """Reverse - engineer the query to figure out why a record was selected . : param query : MongoDB query : type query : MongQuery : param record : Record in question : type record : dict : return : Reasons why bad : rtype : list ( ConstraintViolation )"""
# special case , when no constraints are given if len ( query . all_clauses ) == 0 : return [ NullConstraintViolation ( ) ] # normal case , check all the constraints reasons = [ ] for clause in query . all_clauses : var_name = None key = clause . constraint . field . name op = clause . constraint . op fval = mongo_get ( record , key ) if fval is None : expected = clause . constraint . value reasons . append ( ConstraintViolation ( clause . constraint , 'missing' , expected ) ) continue if op . is_variable ( ) : # retrieve value for variable var_name = clause . constraint . value value = mongo_get ( record , var_name , default = None ) if value is None : reasons . append ( ConstraintViolation ( clause . constraint , 'missing' , var_name ) ) continue clause . constraint . value = value # swap out value , temporarily # take length for size if op . is_size ( ) : if isinstance ( fval , str ) or not hasattr ( fval , '__len__' ) : reasons . append ( ConstraintViolation ( clause . constraint , type ( fval ) , 'sequence' ) ) if op . is_variable ( ) : clause . constraint . value = var_name # put original value back continue fval = len ( fval ) ok , expected = clause . constraint . passes ( fval ) if not ok : reasons . append ( ConstraintViolation ( clause . constraint , fval , expected ) ) if op . is_variable ( ) : clause . constraint . value = var_name # put original value back return reasons
def print_spelling_errors ( filename , encoding = 'utf8' ) : """Print misspelled words returned by sphinxcontrib - spelling"""
filesize = os . stat ( filename ) . st_size if filesize : sys . stdout . write ( 'Misspelled Words:\n' ) with io . open ( filename , encoding = encoding ) as wordlist : for line in wordlist : sys . stdout . write ( ' ' + line ) return 1 if filesize else 0
def view_creatr ( filename ) : """Name of the View File to be created"""
if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:view command' ) return path = os . path . abspath ( '.' ) + '/public/templates' if not os . path . exists ( path ) : os . makedirs ( path ) filename_ = str ( filename + ".html" ) . lower ( ) view_file = open ( path + "/" + filename_ , 'w+' ) view_file . write ( "" ) view_file . close ( ) click . echo ( Fore . GREEN + "View file " + filename_ + "created in public/template folder" )
def unsafe ( self ) : """True if the mapping is unsafe for an update . Applies only to local source . Returns True if the paths for source and destination are the same , or if one is a component of the other path ."""
( scheme , netloc , path , params , query , frag ) = urlparse ( self . src_uri ) if ( scheme != '' ) : return ( False ) s = os . path . normpath ( self . src_uri ) d = os . path . normpath ( self . dst_path ) lcp = os . path . commonprefix ( [ s , d ] ) return ( s == lcp or d == lcp )
def validate ( self , obj ) : """Validate convertibility to internal representation Returns bool True if ' obj ' matches the data type Raises ValidationError If the validation fails"""
if not isinstance ( obj , self . internal_type ) : raise ValidationError ( obj , self . internal_type ) return True
def replace_widgets ( self , widgets , team_context , dashboard_id , eTag = None ) : """ReplaceWidgets . [ Preview API ] Replace the widgets on specified dashboard with the supplied widgets . : param [ Widget ] widgets : Revised state of widgets to store for the dashboard . : param : class : ` < TeamContext > < azure . devops . v5_0 . dashboard . models . TeamContext > ` team _ context : The team context for the operation : param str dashboard _ id : ID of the Dashboard to modify . : param String eTag : Dashboard Widgets Version : rtype : : class : ` < WidgetsVersionedList > < azure . devops . v5_0 . dashboard . models . WidgetsVersionedList > `"""
project = None team = None if team_context is not None : if team_context . project_id : project = team_context . project_id else : project = team_context . project if team_context . team_id : team = team_context . team_id else : team = team_context . team route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'string' ) if team is not None : route_values [ 'team' ] = self . _serialize . url ( 'team' , team , 'string' ) if dashboard_id is not None : route_values [ 'dashboardId' ] = self . _serialize . url ( 'dashboard_id' , dashboard_id , 'str' ) content = self . _serialize . body ( widgets , '[Widget]' ) response = self . _send ( http_method = 'PUT' , location_id = 'bdcff53a-8355-4172-a00a-40497ea23afc' , version = '5.0-preview.2' , route_values = route_values , content = content ) response_object = models . WidgetsVersionedList ( ) response_object . widgets = self . _deserialize ( '[Widget]' , self . _unwrap_collection ( response ) ) response_object . eTag = response . headers . get ( 'ETag' ) return response_object
async def add ( self , full , valu ) : '''Atomically increments a node ' s value .'''
node = await self . open ( full ) oldv = node . valu newv = oldv + valu node . valu = await self . storNodeValu ( full , node . valu + valu ) await node . fire ( 'hive:set' , path = full , valu = valu , oldv = oldv ) return newv
def _read_precursor ( precursor , sps ) : """Load precursor file for that species"""
hairpin = defaultdict ( str ) name = None with open ( precursor ) as in_handle : for line in in_handle : if line . startswith ( ">" ) : if hairpin [ name ] : hairpin [ name ] = hairpin [ name ] + "NNNNNNNNNNNN" name = line . strip ( ) . replace ( ">" , " " ) . split ( ) [ 0 ] else : hairpin [ name ] += line . strip ( ) hairpin [ name ] = hairpin [ name ] + "NNNNNNNNNNNN" return hairpin
def closed_loop_edge_lengths_via_footpoint ( glats , glons , alts , dates , direction , vector_direction , step_size = None , max_steps = None , edge_length = 25. , edge_steps = 5 ) : """Forms closed loop integration along mag field , satrting at input points and goes through footpoint . At footpoint , steps along vector direction in both positive and negative directions , then traces back to opposite footpoint . Back at input location , steps toward those new field lines ( edge _ length ) along vector direction until hitting distance of minimum approach . Loops don ' t always close . Returns total edge distance that goes through input location , along with the distances of closest approach . Note vector direction refers to the magnetic unit vector direction Parameters glats : list - like of floats ( degrees ) Geodetic ( WGS84 ) latitude glons : list - like of floats ( degrees ) Geodetic ( WGS84 ) longitude alts : list - like of floats ( km ) Geodetic ( WGS84 ) altitude , height above surface dates : list - like of datetimes Date and time for determination of scalars direction : string ' north ' or ' south ' for tracing through northern or southern footpoint locations vector _ direction : string ' meridional ' or ' zonal ' unit vector directions step _ size : float ( km ) Step size ( km ) used for field line integration max _ steps : int Number of steps taken for field line integration edge _ length : float ( km ) Half of total edge length ( step ) taken at footpoint location . edge _ length step in both positive and negative directions . edge _ steps : int Number of steps taken from footpoint towards new field line in a given direction ( positive / negative ) along unit vector Returns np . array , np . array , np . array A closed loop field line path through input location and footpoint in northern / southern hemisphere and back is taken . The return edge length through input location is provided . The distances of closest approach for the positive step along vector direction , and the negative step are returned ."""
if step_size is None : step_size = 100. if max_steps is None : max_steps = 1000 steps = np . arange ( max_steps ) if direction == 'south' : direct = - 1 elif direction == 'north' : direct = 1 # use spacecraft location to get ECEF ecef_xs , ecef_ys , ecef_zs = geodetic_to_ecef ( glats , glons , alts ) # prepare output full_local_step = [ ] min_distance_plus = [ ] min_distance_minus = [ ] for ecef_x , ecef_y , ecef_z , glat , glon , alt , date in zip ( ecef_xs , ecef_ys , ecef_zs , glats , glons , alts , dates ) : # going to try and form close loops via field line integration # start at location of interest , map down to northern or southern # footpoints then take symmetric steps along meridional and zonal # directions and trace back from location of interest , step along # field line directions until we intersect or hit the distance of # closest approach to the return field line with the known # distances of footpoint steps , and the closet approach distance # we can determine the scalar mapping of one location to another yr , doy = pysat . utils . getyrdoy ( date ) double_date = float ( yr ) + float ( doy ) / 366. # print ( glat , glon , alt ) # trace to footpoint , starting with input location sc_root = np . array ( [ ecef_x , ecef_y , ecef_z ] ) trace = field_line_trace ( sc_root , double_date , direct , 120. , steps = steps , step_size = step_size , max_steps = max_steps ) # pull out footpoint location ftpnt = trace [ - 1 , : ] ft_glat , ft_glon , ft_alt = ecef_to_geodetic ( * ftpnt ) # take step from footpoint along + vector direction plus_step = step_along_mag_unit_vector ( ftpnt [ 0 ] , ftpnt [ 1 ] , ftpnt [ 2 ] , date , direction = vector_direction , num_steps = edge_steps , step_size = edge_length / edge_steps ) # trace this back to other footpoint other_plus = field_line_trace ( plus_step , double_date , - direct , 0. , steps = steps , step_size = step_size , max_steps = max_steps ) # take half step from first footpoint along - vector direction minus_step = step_along_mag_unit_vector ( ftpnt [ 0 ] , ftpnt [ 1 ] , ftpnt [ 2 ] , date , direction = vector_direction , scalar = - 1 , num_steps = edge_steps , step_size = edge_length / edge_steps ) # trace this back to other footpoint other_minus = field_line_trace ( minus_step , double_date , - direct , 0. , steps = steps , step_size = step_size , max_steps = max_steps ) # need to determine where the intersection of field line coming back from # footpoint through postive vector direction step and back # in relation to the vector direction from the s / c location . pos_edge_length , _ , mind_pos = step_until_intersect ( sc_root , other_plus , 1 , date , direction = vector_direction , field_step_size = step_size , step_size_goal = edge_length / edge_steps ) # take half step from S / C along - vector direction minus_edge_length , _ , mind_minus = step_until_intersect ( sc_root , other_minus , - 1 , date , direction = vector_direction , field_step_size = step_size , step_size_goal = edge_length / edge_steps ) # collect outputs full_local_step . append ( pos_edge_length + minus_edge_length ) min_distance_plus . append ( mind_pos ) min_distance_minus . append ( mind_minus ) return np . array ( full_local_step ) , np . array ( min_distance_plus ) , np . array ( min_distance_minus )
def list_domains ( ) : '''Return a list of virtual machine names on the minion CLI Example : . . code - block : : bash salt ' * ' virt . list _ domains'''
data = __salt__ [ 'vmadm.list' ] ( keyed = True ) vms = [ "UUID TYPE RAM STATE ALIAS" ] for vm in data : vms . append ( "{vmuuid}{vmtype}{vmram}{vmstate}{vmalias}" . format ( vmuuid = vm . ljust ( 38 ) , vmtype = data [ vm ] [ 'type' ] . ljust ( 6 ) , vmram = data [ vm ] [ 'ram' ] . ljust ( 9 ) , vmstate = data [ vm ] [ 'state' ] . ljust ( 18 ) , vmalias = data [ vm ] [ 'alias' ] , ) ) return vms
def _n_parameters ( self ) : """Return the number of free parameters in the model ."""
ndim = self . means_ . shape [ 1 ] if self . covariance_type == 'full' : cov_params = self . n_components * ndim * ( ndim + 1 ) / 2. elif self . covariance_type == 'diag' : cov_params = self . n_components * ndim elif self . covariance_type == 'tied' : cov_params = ndim * ( ndim + 1 ) / 2. elif self . covariance_type == 'spherical' : cov_params = self . n_components mean_params = ndim * self . n_components return int ( cov_params + mean_params + self . n_components - 1 )
def rpc_get_historic_names_by_address ( self , address , offset , count , ** con_info ) : """Get the list of names owned by an address throughout history Return { ' status ' : True , ' names ' : [ { ' name ' : . . . , ' block _ id ' : . . . , ' vtxindex ' : . . . } ] } on success Return { ' error ' : . . . } on error"""
if not check_address ( address ) : return { 'error' : 'Invalid address' , 'http_status' : 400 } if not check_offset ( offset ) : return { 'error' : 'invalid offset' , 'http_status' : 400 } if not check_count ( count , 10 ) : return { 'error' : 'invalid count' , 'http_status' : 400 } db = get_db_state ( self . working_dir ) names = db . get_historic_names_by_address ( address , offset , count ) db . close ( ) if names is None : names = [ ] return self . success_response ( { 'names' : names } )
def update_spec ( self ) : """Update the source specification with information from the row intuiter , but only if the spec values are not already set ."""
if self . datafile . exists : with self . datafile . reader as r : self . header_lines = r . info [ 'header_rows' ] self . comment_lines = r . info [ 'comment_rows' ] self . start_line = r . info [ 'data_start_row' ] self . end_line = r . info [ 'data_end_row' ]
def retrieve_token ( self , token ) : """Retrieve Token details for a specific Token . Args : token : The identifier of the token . Returns :"""
headers = self . client . _get_private_headers ( ) endpoint = '/tokens/{}' . format ( token ) return self . client . _get ( self . client . URL_BASE + endpoint , headers = headers )
def get_abs ( msrc , mrec , srcazm , srcdip , recazm , recdip , verb ) : r"""Get required ab ' s for given angles . This check - function is called from one of the modelling routines in : mod : ` model ` . Consult these modelling routines for a detailed description of the input parameters . Parameters msrc , mrec : bool True if src / rec is magnetic , else False . srcazm , recazm : float Horizontal source / receiver angle ( azimuth ) . srcdip , recdip : float Vertical source / receiver angle ( dip ) . verb : { 0 , 1 , 2 , 3 , 4} Level of verbosity . Returns ab _ calc : array of int ab ' s to calculate for this bipole ."""
# Get required ab ' s ( 9 at most ) ab_calc = np . array ( [ [ 11 , 12 , 13 ] , [ 21 , 22 , 23 ] , [ 31 , 32 , 33 ] ] ) if msrc : ab_calc += 3 if mrec : ab_calc += 30 # Switch < ab > using reciprocity . if msrc : # G ^ mm _ ab ( s , r , e , z ) = - G ^ ee _ ab ( s , r , - z , - e ) ab_calc -= 33 # -30 : mrec - > erec ; - 3 : msrc - > esrc else : # G ^ me _ ab ( s , r , e , z ) = - G ^ em _ ba ( r , s , e , z ) ab_calc = ab_calc % 10 * 10 + ab_calc // 10 # Swap alpha / beta # Remove unnecessary ab ' s bab = np . asarray ( ab_calc * 0 + 1 , dtype = bool ) # Remove if source is x - or y - directed check = np . atleast_1d ( srcazm ) [ 0 ] if np . allclose ( srcazm % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi ( 180) bab [ : , 1 ] *= False # x - directed source , remove y else : # Multiples of pi / 2 ( 90) bab [ : , 0 ] *= False # y - directed source , remove x # Remove if source is vertical check = np . atleast_1d ( srcdip ) [ 0 ] if np . allclose ( srcdip % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi ( 180) bab [ : , 2 ] *= False # Horizontal , remove z else : # Multiples of pi / 2 ( 90) bab [ : , : 2 ] *= False # Vertical , remove x / y # Remove if receiver is x - or y - directed check = np . atleast_1d ( recazm ) [ 0 ] if np . allclose ( recazm % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi ( 180) bab [ 1 , : ] *= False # x - directed receiver , remove y else : # Multiples of pi / 2 ( 90) bab [ 0 , : ] *= False # y - directed receiver , remove x # Remove if receiver is vertical check = np . atleast_1d ( recdip ) [ 0 ] if np . allclose ( recdip % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi ( 180) bab [ 2 , : ] *= False # Horizontal , remove z else : # Multiples of pi / 2 ( 90) bab [ : 2 , : ] *= False # Vertical , remove x / y # Reduce ab_calc = ab_calc [ bab ] . ravel ( ) # Print actual calculated < ab > if verb > 2 : print ( " Required ab's : " , _strvar ( ab_calc ) ) return ab_calc
def fetch ( self , is_dl_forced = True ) : """Fetches data from udp collaboration server , see top level comments for class for more information : return :"""
username = config . get_config ( ) [ 'dbauth' ] [ 'udp' ] [ 'user' ] password = config . get_config ( ) [ 'dbauth' ] [ 'udp' ] [ 'password' ] credentials = ( username , password ) # Get patient map file : patient_id_map = self . open_and_parse_yaml ( self . map_files [ 'patient_ids' ] ) udp_internal_ids = patient_id_map . keys ( ) phenotype_fields = [ 'Patient' , 'HPID' , 'Present' ] # Get phenotype ids for each patient phenotype_params = { 'method' : 'search_subjects' , 'subject_type' : 'Phenotype' , 'search_mode' : 'DEEP' , 'fields' : 'Patient' , 'conditions' : 'equals' , 'values' : ',' . join ( udp_internal_ids ) , 'user_fields' : ',' . join ( phenotype_fields ) } prioritized_variants = [ 'Patient' , 'Gene' , 'Chromosome Position' , 'Variant Allele' , 'Transcript' ] prioritized_params = { 'method' : 'search_subjects' , 'subject_type' : 'Variant Prioritization' , 'search_mode' : 'DEEP' , 'fields' : 'Patient' , 'conditions' : 'equals' , 'values' : ',' . join ( udp_internal_ids ) , 'user_fields' : ',' . join ( prioritized_variants ) , 'format' : 'json' } variant_fields = [ 'Patient' , 'Family' , 'Chr' , 'Build' , 'Chromosome Position' , 'Reference Allele' , 'Variant Allele' , 'Parent of origin' , 'Allele Type' , 'Mutation Type' , 'Gene' , 'Transcript' , 'Original Amino Acid' , 'Variant Amino Acid' , 'Amino Acid Change' , 'Segregates with' , 'Position' , 'Exon' , 'Inheritance model' , 'Zygosity' , 'dbSNP ID' , '1K Frequency' , 'Number of Alleles' ] variant_params = { 'method' : 'search_subjects' , 'subject_type' : 'Exome Analysis Results' , 'search_mode' : 'DEEP' , 'fields' : 'Patient' , 'conditions' : 'equals' , 'user_fields' : ',' . join ( variant_fields ) , 'format' : 'json' } pheno_file = open ( '/' . join ( ( self . rawdir , self . files [ 'patient_phenotypes' ] [ 'file' ] ) ) , 'w' ) variant_file = open ( '/' . join ( ( self . rawdir , self . files [ 'patient_variants' ] [ 'file' ] ) ) , 'w' ) pheno_file . write ( '{0}\n' . format ( '\t' . join ( phenotype_fields ) ) ) variant_file . write ( '{0}\n' . format ( '\t' . join ( variant_fields ) ) ) variant_gene = self . _fetch_data_from_udp ( udp_internal_ids , prioritized_params , prioritized_variants , credentials ) variant_gene_map = dict ( ) for line in variant_gene : variant_gene_map . setdefault ( line [ 0 ] , [ ] ) . append ( # Try to make a unique value based on gene - pos - variantAlele - transcript # TODO make this a dict for readability purposes "{0}-{1}-{2}-{3}" . format ( line [ 1 ] , line [ 2 ] , line [ 3 ] , line [ 4 ] ) ) variant_info = self . _fetch_data_from_udp ( udp_internal_ids , variant_params , variant_fields , credentials ) for line in variant_info : variant = "{0}-{1}-{2}-{3}" . format ( line [ 10 ] , line [ 4 ] , line [ 6 ] , line [ 11 ] ) if variant in variant_gene_map [ line [ 0 ] ] : line [ 0 ] = patient_id_map [ line [ 0 ] ] line [ 4 ] = re . sub ( r'\.0$' , '' , line [ 4 ] ) variant_file . write ( '{0}\n' . format ( '\t' . join ( line ) ) ) phenotype_info = self . _fetch_data_from_udp ( udp_internal_ids , phenotype_params , phenotype_fields , credentials ) for line in phenotype_info : line [ 0 ] = patient_id_map [ line [ 0 ] ] pheno_file . write ( '{0}\n' . format ( '\t' . join ( line ) ) ) variant_file . close ( ) pheno_file . close ( ) return
def load_devices ( self ) : """load stored devices from the local file"""
self . _devices = [ ] if os . path . exists ( self . _devices_filename ) : log . debug ( "loading devices from '{}'..." . format ( self . _devices_filename ) ) with codecs . open ( self . _devices_filename , "rb" , "utf-8" ) as f : self . _devices = json . load ( f ) return self . _devices
def run_netsh_command ( netsh_args ) : """Execute a netsh command and return the output ."""
devnull = open ( os . devnull , 'w' ) command_raw = 'netsh interface ipv4 ' + netsh_args return int ( subprocess . call ( command_raw , stdout = devnull ) )
def _set_up_savefolder ( self ) : """Create catalogs for different file output to clean up savefolder . Non - public method Parameters None Returns None"""
if self . savefolder == None : return self . cells_path = os . path . join ( self . savefolder , 'cells' ) if RANK == 0 : if not os . path . isdir ( self . cells_path ) : os . mkdir ( self . cells_path ) self . figures_path = os . path . join ( self . savefolder , 'figures' ) if RANK == 0 : if not os . path . isdir ( self . figures_path ) : os . mkdir ( self . figures_path ) self . populations_path = os . path . join ( self . savefolder , 'populations' ) if RANK == 0 : if not os . path . isdir ( self . populations_path ) : os . mkdir ( self . populations_path ) COMM . Barrier ( )
def get_1D_overlap ( eclusters , depth = 1 ) : """Find blocks that are 1D overlapping , returns cliques of block ids that are in conflict"""
overlap_set = set ( ) active = set ( ) ends = [ ] for i , ( chr , left , right ) in enumerate ( eclusters ) : ends . append ( ( chr , left , 0 , i ) ) # 0/1 for left / right - ness ends . append ( ( chr , right , 1 , i ) ) ends . sort ( ) chr_last = "" for chr , pos , left_right , i in ends : if chr != chr_last : active . clear ( ) if left_right == 0 : active . add ( i ) else : active . remove ( i ) if len ( active ) > depth : overlap_set . add ( tuple ( sorted ( active ) ) ) chr_last = chr return overlap_set
def handle_hooks ( stage , hooks , provider , context , dump , outline ) : """Handle pre / post hooks . Args : stage ( str ) : The name of the hook stage - pre _ build / post _ build . hooks ( list ) : A list of dictionaries containing the hooks to execute . provider ( : class : ` stacker . provider . base . BaseProvider ` ) : The provider the current stack is using . context ( : class : ` stacker . context . Context ` ) : The current stacker context . dump ( bool ) : Whether running with dump set or not . outline ( bool ) : Whether running with outline set or not ."""
if not outline and not dump and hooks : utils . handle_hooks ( stage = stage , hooks = hooks , provider = provider , context = context )
def download_image ( self , image_type , image ) : """Read file of a project and download it : param image _ type : Image type : param image : The path of the image : returns : A file stream"""
url = self . _getUrl ( "/{}/images/{}" . format ( image_type , image ) ) response = yield from self . _session ( ) . request ( "GET" , url , auth = self . _auth ) if response . status == 404 : raise aiohttp . web . HTTPNotFound ( text = "{} not found on compute" . format ( image ) ) return response
def get_all_clients ( self , params = None ) : """Get all clients This will iterate over all pages until it gets all elements . So if the rate limit exceeded it will throw an Exception and you will get nothing : param params : search params : return : list"""
return self . _iterate_through_pages ( get_function = self . get_clients_per_page , resource = CLIENTS , ** { 'params' : params } )
def set_deployment_run_id ( self ) : """Sets the deployment run ID from deployment properties : return : None"""
log = logging . getLogger ( self . cls_logger + '.set_deployment_run_id' ) deployment_run_id_val = self . get_value ( 'cons3rt.deploymentRun.id' ) if not deployment_run_id_val : log . debug ( 'Deployment run ID not found in deployment properties' ) return try : deployment_run_id = int ( deployment_run_id_val ) except ValueError : log . debug ( 'Deployment run ID found was unable to convert to an int: {d}' . format ( d = deployment_run_id_val ) ) return self . deployment_run_id = deployment_run_id log . info ( 'Found deployment run ID: {i}' . format ( i = str ( self . deployment_run_id ) ) )
def is_shown ( self , request ) : """If there aren ' t any visible items in the submenu , don ' t bother to show this menu item"""
for menuitem in self . menu . _registered_menu_items : if menuitem . is_shown ( request ) : return True return False
def scaled_dimensions ( self , width = None , height = None ) : """Return a ( cx , cy ) 2 - tuple representing the native dimensions of this image scaled by applying the following rules to * width * and * height * . If both * width * and * height * are specified , the return value is ( * width * , * height * ) ; no scaling is performed . If only one is specified , it is used to compute a scaling factor that is then applied to the unspecified dimension , preserving the aspect ratio of the image . If both * width * and * height * are | None | , the native dimensions are returned . The native dimensions are calculated using the dots - per - inch ( dpi ) value embedded in the image , defaulting to 72 dpi if no value is specified , as is often the case . The returned values are both | Length | objects ."""
if width is None and height is None : return self . width , self . height if width is None : scaling_factor = float ( height ) / float ( self . height ) width = round ( self . width * scaling_factor ) if height is None : scaling_factor = float ( width ) / float ( self . width ) height = round ( self . height * scaling_factor ) return Emu ( width ) , Emu ( height )
def latmio_dir ( R , itr , D = None , seed = None ) : '''This function " latticizes " a directed network , while preserving the in - and out - degree distributions . In weighted networks , the function preserves the out - strength but not the in - strength distributions . Parameters R : NxN np . ndarray directed binary / weighted connection matrix itr : int rewiring parameter . Each edge is rewired approximately itr times . D : np . ndarray | None distance - to - diagonal matrix . Defaults to the actual distance matrix if not specified . seed : hashable , optional If None ( default ) , use the np . random ' s global random state to generate random numbers . Otherwise , use a new np . random . RandomState instance seeded with the given value . Returns Rlatt : NxN np . ndarray latticized network in original node ordering Rrp : NxN np . ndarray latticized network in node ordering used for latticization ind _ rp : Nx1 np . ndarray node ordering used for latticization eff : int number of actual rewirings carried out'''
rng = get_rng ( seed ) n = len ( R ) ind_rp = rng . permutation ( n ) # randomly reorder matrix R = R . copy ( ) R = R [ np . ix_ ( ind_rp , ind_rp ) ] # create distance to diagonal matrix if not specified by user if D is None : D = np . zeros ( ( n , n ) ) un = np . mod ( range ( 1 , n ) , n ) um = np . mod ( range ( n - 1 , 0 , - 1 ) , n ) u = np . append ( ( 0 , ) , np . where ( un < um , un , um ) ) for v in range ( int ( np . ceil ( n / 2 ) ) ) : D [ n - v - 1 , : ] = np . append ( u [ v + 1 : ] , u [ : v + 1 ] ) D [ v , : ] = D [ n - v - 1 , : ] [ : : - 1 ] i , j = np . where ( R ) k = len ( i ) itr *= k # maximal number of rewiring attempts per iteration max_attempts = np . round ( n * k / ( n * ( n - 1 ) ) ) # actual number of successful rewirings eff = 0 for it in range ( itr ) : att = 0 while att <= max_attempts : # while not rewired while True : e1 = rng . randint ( k ) e2 = rng . randint ( k ) while e1 == e2 : e2 = rng . randint ( k ) a = i [ e1 ] b = j [ e1 ] c = i [ e2 ] d = j [ e2 ] if a != c and a != d and b != c and b != d : break # rewiring condition if not ( R [ a , d ] or R [ c , b ] ) : # lattice condition if ( D [ a , b ] * R [ a , b ] + D [ c , d ] * R [ c , d ] >= D [ a , d ] * R [ a , b ] + D [ c , b ] * R [ c , d ] ) : R [ a , d ] = R [ a , b ] R [ a , b ] = 0 R [ c , b ] = R [ c , d ] R [ c , d ] = 0 j . setflags ( write = True ) j [ e1 ] = d j [ e2 ] = b # reassign edge indices eff += 1 break att += 1 Rlatt = R [ np . ix_ ( ind_rp [ : : - 1 ] , ind_rp [ : : - 1 ] ) ] # reverse random permutation return Rlatt , R , ind_rp , eff
def get_precursor_mz ( exact_mass , precursor_type ) : """Calculate precursor mz based on exact mass and precursor type Args : exact _ mass ( float ) : exact mass of compound of interest precursor _ type ( str ) : Precursor type ( currently only works with ' [ M - H ] - ' , ' [ M + H ] + ' and ' [ M + H - H2O ] + ' Return : neutral mass of compound"""
# these are just taken from what was present in the massbank . msp file for those missing the exact mass d = { '[M-H]-' : - 1.007276 , '[M+H]+' : 1.007276 , '[M+H-H2O]+' : 1.007276 - ( ( 1.007276 * 2 ) + 15.9949 ) } try : return exact_mass + d [ precursor_type ] except KeyError as e : print ( e ) return False
def make_module ( self , vars = None , shared = False , locals = None ) : """This method works like the : attr : ` module ` attribute when called without arguments but it will evaluate the template on every call rather than caching it . It ' s also possible to provide a dict which is then used as context . The arguments are the same as for the : meth : ` new _ context ` method ."""
return TemplateModule ( self , self . new_context ( vars , shared , locals ) )
def get_hour_dirs ( root = None ) : """Gets the directories under selfplay _ dir that match YYYY - MM - DD - HH ."""
root = root or selfplay_dir ( ) return list ( filter ( lambda s : re . match ( r"\d{4}-\d{2}-\d{2}-\d{2}" , s ) , gfile . ListDirectory ( root ) ) )
def _add_redundancy_router_interfaces ( self , context , router , itfc_info , new_port , redundancy_router_ids = None , ha_settings_db = None , create_ha_group = True ) : """To be called in add _ router _ interface ( ) AFTER interface has been added to router in DB ."""
# There are essentially three cases where we add interface to a # redundancy router : # 1 . HA is enabled on a user visible router that has one or more # interfaces . # 2 . Redundancy level is increased so one or more redundancy routers # are added . # 3 . An interface is added to a user visible router . # For 1 : An HA GROUP MUST BE CREATED and EXTRA PORTS MUST BE CREATED # for each redundancy router . The id of extra port should be # specified in the interface _ info argument of the # add _ router _ interface call so that we ADD BY PORT . # For 2 : HA group need NOT be created as it will already exist ( since # there is already at least on redundancy router ) . EXTRA PORTS # MUST BE CREATED for each added redundancy router . The id # of extra port should be specified in the interface _ info # argument of the add _ router _ interface call so that we ADD BY # PORT . # For 3 : if the interface for the user _ visible _ router was added by . . . # a ) PORT : An HA GROUP MUST BE CREATED and and EXTRA PORTS MUST BE # CREATED for each redundancy router . The id of extra port # should be specified in the interface _ info argument of # the add _ router _ interface call so that we ADD BY PORT . # b ) SUBNET : There are two cases to consider . If the added interface # of the user _ visible _ router has . . . # b1 ) 1 SUBNET : An HA GROUP MUST BE CREATED and and EXTRA # PORTS MUST BE CREATED for each redundancy # router . The id of extra port should be # specified in the interface _ info argument of # the add _ router _ interface call so we ADD BY # PORT . # b2 ) > 1 SUBNETS : HA group need NOT be created as it will # already exist ( since the redundancy routers # should already have extra ports to which the # ( IPv6 ) subnet is added . Extra ports need # thus NOT be created . The subnet id should be # added to the existing extra ports . router_id = router [ 'id' ] if ha_settings_db is None : ha_settings_db = self . _get_ha_settings_by_router_id ( context , router_id ) if ha_settings_db is None : return e_context = context . elevated ( ) add_by_subnet = ( itfc_info is not None and 'subnet_id' in itfc_info and len ( new_port [ 'fixed_ips' ] ) > 1 ) if ( add_by_subnet is False or ( itfc_info is None and create_ha_group is True ) ) : # generate ha settings and extra port for router ( VIP ) port self . _create_ha_group ( e_context , router , new_port , ha_settings_db ) fixed_ips = self . _get_fixed_ips_subnets ( new_port [ 'fixed_ips' ] ) for r_id in ( redundancy_router_ids or self . _get_redundancy_router_ids ( e_context , router_id ) ) : if add_by_subnet is True : # need to add subnet to redundancy router port ports = self . _core_plugin . get_ports ( e_context , filters = { 'device_id' : [ r_id ] , 'network_id' : [ new_port [ 'network_id' ] ] } , fields = [ 'fixed_ips' , 'id' ] ) redundancy_port = ports [ 0 ] fixed_ips = redundancy_port [ 'fixed_ips' ] fixed_ip = { 'subnet_id' : itfc_info [ 'subnet_id' ] } fixed_ips . append ( fixed_ip ) self . _core_plugin . update_port ( e_context , redundancy_port [ 'id' ] , { 'port' : { 'fixed_ips' : fixed_ips } } ) else : redundancy_port = self . _create_hidden_port ( e_context , new_port [ 'network_id' ] , '' , fixed_ips ) interface_info = { 'port_id' : redundancy_port [ 'id' ] } self . add_router_interface ( e_context , r_id , interface_info )
def async_send ( self , url , data , headers , success_cb , failure_cb ) : """Spawn an async request to a remote webserver ."""
# this can be optimized by making a custom self . send that does not # read the response since we don ' t use it . self . _lock . acquire ( ) return gevent . spawn ( super ( GeventedHTTPTransport , self ) . send , url , data , headers ) . link ( lambda x : self . _done ( x , success_cb , failure_cb ) )
def _CompileProtos ( ) : """Compiles all Fleetspeak protos ."""
proto_files = [ ] for dir_path , _ , filenames in os . walk ( THIS_DIRECTORY ) : for filename in filenames : if filename . endswith ( ".proto" ) : proto_files . append ( os . path . join ( dir_path , filename ) ) if not proto_files : return protoc_command = [ "python" , "-m" , "grpc_tools.protoc" , "--python_out" , THIS_DIRECTORY , "--grpc_python_out" , THIS_DIRECTORY , "--proto_path" , THIS_DIRECTORY , ] protoc_command . extend ( proto_files ) subprocess . check_output ( protoc_command )
def loglike ( self , y , f , n ) : r"""Binomial log likelihood . Parameters y : ndarray array of 0 , 1 valued integers of targets f : ndarray latent function from the GLM prior ( : math : ` \ mathbf { f } = \ boldsymbol \ Phi \ mathbf { w } ` ) n : ndarray the total number of observations Returns logp : ndarray the log likelihood of each y given each f under this likelihood ."""
ll = binom . logpmf ( y , n = n , p = expit ( f ) ) return ll
def dataframe ( self ) : """Returns a pandas DataFrame containing all other class properties and values . The index for the DataFrame is the string abbreviation of the team , such as ' DET ' ."""
fields_to_include = { 'abbreviation' : self . abbreviation , 'average_age' : self . average_age , 'games_played' : self . games_played , 'goals_against' : self . goals_against , 'goals_for' : self . goals_for , 'losses' : self . losses , 'name' : self . name , 'overtime_losses' : self . overtime_losses , 'pdo_at_even_strength' : self . pdo_at_even_strength , 'penalty_killing_percentage' : self . penalty_killing_percentage , 'points' : self . points , 'points_percentage' : self . points_percentage , 'power_play_goals' : self . power_play_goals , 'power_play_goals_against' : self . power_play_goals_against , 'power_play_opportunities' : self . power_play_opportunities , 'power_play_opportunities_against' : self . power_play_opportunities_against , 'power_play_percentage' : self . power_play_percentage , 'rank' : self . rank , 'save_percentage' : self . save_percentage , 'shooting_percentage' : self . shooting_percentage , 'short_handed_goals' : self . short_handed_goals , 'short_handed_goals_against' : self . short_handed_goals_against , 'shots_against' : self . shots_against , 'shots_on_goal' : self . shots_on_goal , 'simple_rating_system' : self . simple_rating_system , 'strength_of_schedule' : self . strength_of_schedule , 'total_goals_per_game' : self . total_goals_per_game , 'wins' : self . wins } return pd . DataFrame ( [ fields_to_include ] , index = [ self . _abbreviation ] )
def blog_authors ( * args ) : """Put a list of authors ( users ) for blog posts into the template context ."""
blog_posts = BlogPost . objects . published ( ) authors = User . objects . filter ( blogposts__in = blog_posts ) return list ( authors . annotate ( post_count = Count ( "blogposts" ) ) )
def _resize_with_dtype ( arr , dtype ) : """This function will transform arr into an array with the same type as dtype . It will do this by filling new columns with zeros ( or NaNs , if it is a float column ) . Also , columns that are not in the new dtype will be dropped ."""
structured_arrays = dtype . names is not None and arr . dtype . names is not None old_columns = arr . dtype . names or [ ] new_columns = dtype . names or [ ] # In numpy 1.9 the ndarray . astype method used to handle changes in number of fields . The code below # should replicate the same behaviour the old astype used to have . # One may be tempted to use np . lib . recfunctions . stack _ arrays to implement both this step and the # concatenate that follows but it 2x slower and it requires providing your own default values ( instead # of np . zeros ) . # Numpy 1.14 supports doing new _ arr [ old _ columns ] = arr [ old _ columns ] , which is faster than the code below # ( in benchmarks it seems to be even slightly faster than using the old astype ) . However , that is not # supported by numpy 1.9.2. if structured_arrays and ( old_columns != new_columns ) : old_columns = set ( old_columns ) new_columns = set ( new_columns ) new_arr = np . zeros ( arr . shape , dtype ) for c in old_columns & new_columns : new_arr [ c ] = arr [ c ] # missing float columns should default to nan rather than zero _is_float_type = lambda _dtype : _dtype . type in ( np . float32 , np . float64 ) _is_void_float_type = lambda _dtype : _dtype . type == np . void and _is_float_type ( _dtype . subdtype [ 0 ] ) _is_float_or_void_float_type = lambda _dtype : _is_float_type ( _dtype ) or _is_void_float_type ( _dtype ) _is_float = lambda column : _is_float_or_void_float_type ( dtype . fields [ column ] [ 0 ] ) for new_column in filter ( _is_float , new_columns - old_columns ) : new_arr [ new_column ] = np . nan return new_arr . astype ( dtype ) else : return arr . astype ( dtype )
def _deps_only_toggled ( self , widget , data = None ) : """Function deactivate options in case of deps _ only and opposite"""
active = widget . get_active ( ) self . dir_name . set_sensitive ( not active ) self . entry_project_name . set_sensitive ( not active ) self . dir_name_browse_btn . set_sensitive ( not active ) self . run_btn . set_sensitive ( active or not self . project_name_shown or self . entry_project_name . get_text ( ) != "" )
def _get_xref ( self , line ) : """Given line , return optional attribute xref value in a dict of sets ."""
# Ex : Wikipedia : Zygotene # Ex : Reactome : REACT _ 22295 " Addition of a third mannose to . . . " mtch = self . attr2cmp [ 'xref' ] . match ( line ) return mtch . group ( 1 ) . replace ( ' ' , '' )
def show_formats ( ) : """Print a list of all the file formats that are supported for writing . The file formats are determined by their extensions . Returns None"""
fmts = { "ann" : "Kvis annotation" , "reg" : "DS9 regions file" , "fits" : "FITS Binary Table" , "csv" : "Comma separated values" , "tab" : "tabe separated values" , "tex" : "LaTeX table format" , "html" : "HTML table" , "vot" : "VO-Table" , "xml" : "VO-Table" , "db" : "Sqlite3 database" , "sqlite" : "Sqlite3 database" } supported = get_table_formats ( ) print ( "Extension | Description | Supported?" ) for k in sorted ( fmts . keys ( ) ) : print ( "{0:10s} {1:24s} {2}" . format ( k , fmts [ k ] , k in supported ) ) return
def md5 ( text ) : """Returns the md5 hash of a string ."""
h = hashlib . md5 ( ) h . update ( _unicode ( text ) . encode ( "utf-8" ) ) return h . hexdigest ( )
def format_exception ( cls , instance , trcback , context = 1 ) : """| Formats given exception . | The code produce a similar output to : func : ` traceback . format _ exception ` except that it allows frames to be excluded from the stack if the given stack trace frame tag is found in the frame locals and set * * True * * . : param cls : Exception class . : type cls : object : param instance : Exception instance . : type instance : object : param trcback : Traceback . : type trcback : Traceback : param context : Context being included . : type context : int : return : Formated exception . : rtype : list"""
stack = extract_stack ( get_inner_most_frame ( trcback ) , context = context ) output = [ ] output . append ( "Traceback (most recent call last):" ) for frame , file_name , line_number , name , context , index in stack : output . append ( " File \"{0}\", line {1}, in {2}" . format ( file_name , line_number , name ) ) for line in context : output . append ( " {0}" . format ( line . strip ( ) ) ) for line in traceback . format_exception_only ( cls , instance ) : output . append ( "{0}" . format ( line ) ) return output
def set_screen_layout ( self , screen_layout_mode , guest_screen_info ) : """Set video modes for the guest screens . in screen _ layout _ mode of type : class : ` ScreenLayoutMode ` in guest _ screen _ info of type : class : ` IGuestScreenInfo `"""
if not isinstance ( screen_layout_mode , ScreenLayoutMode ) : raise TypeError ( "screen_layout_mode can only be an instance of type ScreenLayoutMode" ) if not isinstance ( guest_screen_info , list ) : raise TypeError ( "guest_screen_info can only be an instance of type list" ) for a in guest_screen_info [ : 10 ] : if not isinstance ( a , IGuestScreenInfo ) : raise TypeError ( "array can only contain objects of type IGuestScreenInfo" ) self . _call ( "setScreenLayout" , in_p = [ screen_layout_mode , guest_screen_info ] )
def _ParsePathSpecification ( self , knowledge_base , searcher , file_system , path_specification , path_separator ) : """Parses a file system for a preprocessing attribute . Args : knowledge _ base ( KnowledgeBase ) : to fill with preprocessing information . searcher ( dfvfs . FileSystemSearcher ) : file system searcher to preprocess the file system . file _ system ( dfvfs . FileSystem ) : file system to be preprocessed . path _ specification ( dfvfs . PathSpec ) : path specification that contains the artifact value data . path _ separator ( str ) : path segment separator . Raises : PreProcessFail : if the preprocessing fails ."""
try : file_entry = searcher . GetFileEntryByPathSpec ( path_specification ) except IOError as exception : relative_path = searcher . GetRelativePath ( path_specification ) if path_separator != file_system . PATH_SEPARATOR : relative_path_segments = file_system . SplitPath ( relative_path ) relative_path = '{0:s}{1:s}' . format ( path_separator , path_separator . join ( relative_path_segments ) ) raise errors . PreProcessFail ( ( 'Unable to retrieve file entry: {0:s} with error: ' '{1!s}' ) . format ( relative_path , exception ) ) if file_entry : self . _ParseFileEntry ( knowledge_base , file_entry )
def are_same_file_types ( objs ) : """Are given ( maybe ) file objs same type ( extension ) ? : param objs : A list of file path or file ( - like ) objects > > > are _ same _ file _ types ( [ ] ) False > > > are _ same _ file _ types ( [ " a . conf " ] ) True > > > are _ same _ file _ types ( [ " a . conf " , " b . conf " ] ) True > > > are _ same _ file _ types ( [ " a . yml " , " b . yml " ] ) True > > > are _ same _ file _ types ( [ " a . yml " , " b . json " ] ) False > > > strm = anyconfig . compat . StringIO ( ) > > > are _ same _ file _ types ( [ " a . yml " , " b . yml " , strm ] ) False"""
if not objs : return False ext = _try_to_get_extension ( objs [ 0 ] ) if ext is None : return False return all ( _try_to_get_extension ( p ) == ext for p in objs [ 1 : ] )
def dtool ( debug ) : """Tool to work with datasets ."""
level = logging . WARNING if debug : level = logging . DEBUG logging . basicConfig ( format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' , level = level )
def unsubscribe ( self , future ) : """Terminates the subscription given by a future Args : future ( Future ) : The future of the original subscription"""
assert future not in self . _pending_unsubscribes , "%r has already been unsubscribed from" % self . _pending_unsubscribes [ future ] subscribe = self . _requests [ future ] self . _pending_unsubscribes [ future ] = subscribe # Clear out the subscription self . _subscriptions . pop ( subscribe . id ) request = Unsubscribe ( subscribe . id ) request . set_callback ( self . _q . put ) try : controller = self . get_controller ( subscribe . path [ 0 ] ) except ValueError : # Controller has already gone , probably during tearDown pass else : self . handle_request ( controller , request )
def export_svgs ( obj , filename = None , height = None , width = None , webdriver = None , timeout = 5 ) : '''Export the SVG - enabled plots within a layout . Each plot will result in a distinct SVG file . If the filename is not given , it is derived from the script name ( e . g . ` ` / foo / myplot . py ` ` will create ` ` / foo / myplot . svg ` ` ) Args : obj ( LayoutDOM object ) : a Layout ( Row / Column ) , Plot or Widget object to display filename ( str , optional ) : filename to save document under ( default : None ) If None , infer from the filename . height ( int ) : the desired height of the exported layout obj only if it ' s a Plot instance . Otherwise the height kwarg is ignored . width ( int ) : the desired width of the exported layout obj only if it ' s a Plot instance . Otherwise the width kwarg is ignored . webdriver ( selenium . webdriver ) : a selenium webdriver instance to use to export the image . timeout ( int ) : the maximum amount of time ( in seconds ) to wait for Bokeh to initialize ( default : 5 ) ( Added in 1.1.1 ) . Returns : filenames ( list ( str ) ) : the list of filenames where the SVGs files are saved . . . warning : : Responsive sizing _ modes may generate layouts with unexpected size and aspect ratios . It is recommended to use the default ` ` fixed ` ` sizing mode .'''
svgs = get_svgs ( obj , height = height , width = width , driver = webdriver , timeout = timeout ) if len ( svgs ) == 0 : log . warning ( "No SVG Plots were found." ) return if filename is None : filename = default_filename ( "svg" ) filenames = [ ] for i , svg in enumerate ( svgs ) : if i == 0 : filename = filename else : idx = filename . find ( ".svg" ) filename = filename [ : idx ] + "_{}" . format ( i ) + filename [ idx : ] with io . open ( filename , mode = "w" , encoding = "utf-8" ) as f : f . write ( svg ) filenames . append ( filename ) return filenames