signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _get_distance_term ( self , C , rjb , mag ) : """Returns the general distance scaling term - equation 2"""
c_3 = self . _get_anelastic_coeff ( C ) rval = np . sqrt ( rjb ** 2. + C [ "h" ] ** 2. ) return ( C [ "c1" ] + C [ "c2" ] * ( mag - self . CONSTS [ "Mref" ] ) ) * np . log ( rval / self . CONSTS [ "Rref" ] ) + c_3 * ( rval - self . CONSTS [ "Rref" ] )
def _get_station_codes ( self , force = False ) : """Gets and caches a list of station codes optionally within a bbox . Will return the cached version if it exists unless force is True ."""
if not force and self . station_codes is not None : return self . station_codes state_urls = self . _get_state_urls ( ) # filter by bounding box against a shapefile state_matches = None if self . bbox : with collection ( os . path . join ( "resources" , "ne_50m_admin_1_states_provinces_lakes_shp.shp" , ) , "r" , ) as c : geom_matches = [ x [ "properties" ] for x in c . filter ( bbox = self . bbox ) ] state_matches = [ x [ "postal" ] if x [ "admin" ] != "Canada" else "CN" for x in geom_matches ] self . station_codes = [ ] for state_url in state_urls : if state_matches is not None : state_abbr = state_url . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ] if state_abbr not in state_matches : continue self . station_codes . extend ( self . _get_stations_for_state ( state_url ) ) if self . bbox : # retrieve metadata for all stations to properly filter them metadata = self . _get_metadata ( self . station_codes ) parsed_metadata = self . parser . _parse_metadata ( metadata ) def in_bbox ( code ) : lat = parsed_metadata [ code ] [ "latitude" ] lon = parsed_metadata [ code ] [ "longitude" ] return ( lon >= self . bbox [ 0 ] and lon <= self . bbox [ 2 ] and lat >= self . bbox [ 1 ] and lat <= self . bbox [ 3 ] ) self . station_codes = list ( filter ( in_bbox , self . station_codes ) ) return self . station_codes
def parse_value ( v , parser , config , description ) : """Convert a string received on the command - line into a value or None . : param str v : The value to parse . : param parser : The fallback callable to load the value if loading from scheme fails . : param dict config : The config to use . : param str description : Description ( for debugging ) : return : The parsed value : rtype : object"""
val = None if v == '' : return if v is not None : try : val = load_value_from_schema ( v ) except Exception as e : six . raise_from ( CertifierTypeError ( message = '{kind}' . format ( description = description , kind = type ( v ) . __name__ , ) , required = config [ 'required' ] , value = v , ) , e ) else : if val is None : try : return parser ( v ) except CertifierTypeError : raise except CertifierValueError : raise except TypeError as e : six . raise_from ( CertifierTypeError ( message = '{kind}' . format ( description = description , kind = type ( v ) . __name__ , ) , required = config [ 'required' ] , value = v , ) , e ) except ValueError as e : six . raise_from ( CertifierValueError ( message = '{value}' . format ( description = description , value = v , ) , required = config [ 'required' ] , value = v , ) , e ) return val
def releases ( self ) : """The releases for this app ."""
return self . _h . _get_resources ( resource = ( 'apps' , self . name , 'releases' ) , obj = Release , app = self )
async def get_first_search_result ( self , term : str ) : """Get first search result . This function will parse the information from the link that search _ novel _ updates returns and then return it as a dictionary : param term : The novel to search for and parse"""
# Uses the other method in the class # to search the search page for the actual page that we want to_parse = await self . get_search_page ( term ) async with self . session . get ( to_parse ) as response : # If the response is OK if response . status == 200 : # The information to parse parse_info = BeautifulSoup ( await response . text ( ) , 'lxml' ) # Artists , # defined up here so we can account for if it is None , e . g . for web novels ect artists = parse_info . find ( 'a' , class_ = 'genre' , id = 'artiststag' ) # English publisher , # defined here so we can account for it if None , # e . g . for works unlicensed in English english_publisher = parse_info . find ( 'a' , class_ = 'genre' , id = 'myepub' ) # Publisher , # defined here so we can account for it if it ' s None , e . g . not published publisher = parse_info . find ( 'a' , class_ = 'genre' , id = 'myopub' ) # Accounting for if Artists / English Publisher / Publisher is None if artists is not None : artists = artists . string if english_publisher is not None : try : english_publisher = english_publisher . children . string except AttributeError : # english publisher ' s children tag is not string . english_publisher = list ( english_publisher . children ) if len ( english_publisher ) == 1 : english_publisher = english_publisher [ 0 ] if publisher is not None : publisher = publisher . string # The data to return to the user , in a dictionary no_img_found_url = 'http://www.novelupdates.com/img/noimagefound.jpg' data = { 'title' : self . _get_title ( parse_info = parse_info ) , 'cover' : ( None if parse_info . find ( 'img' ) . get ( 'src' ) == no_img_found_url else parse_info . find ( 'img' ) . get ( 'src' ) ) , 'type' : parse_info . find ( 'a' , class_ = 'genre type' ) . string , 'genre' : ( [ x . string for x in list ( parse_info . find_all ( 'div' , id = 'seriesgenre' ) [ 0 ] . children ) if len ( x . string . strip ( ) ) > 0 ] ) , 'tags' : ( [ x . string for x in list ( parse_info . find_all ( 'div' , id = 'showtags' ) [ 0 ] . children ) if len ( x . string . strip ( ) ) > 0 ] ) , 'language' : parse_info . find ( 'a' , class_ = 'genre lang' ) . string , 'authors' : list ( set ( [ x . string for x in parse_info . find_all ( 'a' , id = 'authtag' ) ] ) ) , 'artists' : artists , 'year' : parse_info . find ( 'div' , id = 'edityear' ) . string . strip ( ) , 'novel_status' : self . _get_novel_status ( parse_info = parse_info ) , 'licensed' : ( True if parse_info . find ( 'div' , id = 'showlicensed' ) . string . strip ( ) == 'Yes' else False ) , 'completely_translated' : ( True if len ( list ( parse_info . find ( 'div' , id = 'showtranslated' ) . descendants ) ) > 1 else False ) , 'publisher' : publisher , 'english_publisher' : english_publisher , 'description' : ( ' ' . join ( [ x . string . strip ( ) for x in list ( parse_info . find ( 'div' , id = 'editdescription' ) . children ) if x . string . strip ( ) ] ) ) , 'aliases' : self . _get_aliases ( parse_info = parse_info ) , 'link' : to_parse , 'related_series' : self . _get_related_series ( parse_info = parse_info ) } # Returning the dictionary with all of the information # from novelupdates that we parsed return data else : # Raise an error with the response status raise aiohttp . ClientResponseError ( response . status )
def get_matcher ( self , reqt ) : """Get a version matcher for a requirement . : param reqt : The requirement : type reqt : str : return : A version matcher ( an instance of : class : ` distlib . version . Matcher ` ) ."""
try : matcher = self . scheme . matcher ( reqt ) except UnsupportedVersionError : # pragma : no cover # XXX compat - mode if cannot read the version name = reqt . split ( ) [ 0 ] matcher = self . scheme . matcher ( name ) return matcher
def start_parallel ( self ) : """Prepare threads to run tasks"""
for _ in range ( self . num_threads ) : Worker ( self . tasks_queue , self . results_queue , self . exceptions_queue )
def claim_keys ( self , key_request , timeout = None ) : """Claims one - time keys for use in pre - key messages . Args : key _ request ( dict ) : The keys to be claimed . Format should be < user _ id > : { < device _ id > : < algorithm > } . timeout ( int ) : Optional . The time ( in milliseconds ) to wait when downloading keys from remote servers ."""
content = { "one_time_keys" : key_request } if timeout : content [ "timeout" ] = timeout return self . _send ( "POST" , "/keys/claim" , content = content )
def has_flag ( compiler , flagname ) : """Return a boolean indicating whether a flag name is supported on the specified compiler ."""
with TemporaryDirectory ( ) as tmpdir , stdchannel_redirected ( sys . stderr , os . devnull ) , stdchannel_redirected ( sys . stdout , os . devnull ) : f = tempfile . mktemp ( suffix = '.cpp' , dir = tmpdir ) with open ( f , 'w' ) as fh : fh . write ( 'int main (int argc, char **argv) { return 0; }' ) try : compiler . compile ( [ f ] , extra_postargs = [ flagname ] , output_dir = tmpdir ) except setuptools . distutils . errors . CompileError : return False return True
def createReader ( clazz , readername ) : """Static method to create a reader from a reader clazz . @ param clazz : the reader class name @ param readername : the reader name"""
if not clazz in ReaderFactory . factories : ReaderFactory . factories [ clazz ] = get_class ( clazz ) . Factory ( ) return ReaderFactory . factories [ clazz ] . create ( readername )
def get_ci ( theta_star , blockratio = 1.0 ) : """Get the confidence interval ."""
# get rid of nans while we sort b_star = np . sort ( theta_star [ ~ np . isnan ( theta_star ) ] ) se = np . std ( b_star ) * np . sqrt ( blockratio ) # bootstrap 95 % CI based on empirical percentiles ci = [ b_star [ int ( len ( b_star ) * .025 ) ] , b_star [ int ( len ( b_star ) * .975 ) ] ] return ci
def __find_new ( self , hueobjecttype ) : '''Starts a search for new Hue objects'''
assert hueobjecttype in [ 'lights' , 'sensors' ] , 'Unsupported object type {}' . format ( hueobjecttype ) url = '{}/{}' . format ( self . API , hueobjecttype ) return self . _request ( method = 'POST' , url = url )
def step ( self , t , x_im1 , v_im1_2 , dt ) : """Step forward the positions and velocities by the given timestep . Parameters dt : numeric The timestep to move forward ."""
x_i = x_im1 + v_im1_2 * dt F_i = self . F ( t , np . vstack ( ( x_i , v_im1_2 ) ) , * self . _func_args ) a_i = F_i [ self . ndim : ] v_i = v_im1_2 + a_i * dt / 2 v_ip1_2 = v_i + a_i * dt / 2 return x_i , v_i , v_ip1_2
def add_graph_copy ( self , graph , tags = None ) : """Adds a copy of Graph with the specified set of tags ."""
with graph . as_default ( ) : # Remove default attrs so that Modules created by a tensorflow version # with ops that have new attrs that are left to their default values can # still be loaded by older versions unware of those attributes . meta_graph = tf_v1 . train . export_meta_graph ( strip_default_attrs = True ) _export_tags ( meta_graph , tags ) _export_signatures ( meta_graph ) _export_module_attachments ( meta_graph ) self . _proto . meta_graphs . extend ( [ meta_graph ] )
def _validate_templates ( self , templates ) : """: param templates : : return :"""
if templates is None : return templates if not isinstance ( templates , list ) : raise TypeError ( logger . error ( "templates should be a list." ) ) for template in templates : if not isinstance ( template , dict ) : raise TypeError ( logger . error ( "each item to be injected must be a dict." ) ) if template . get ( 'notifications' ) : for level , notification in six . iteritems ( template . get ( 'notifications' ) ) : if level == 'errors' : logger . error ( "errors were returned during the injection process. errors: {0}" . format ( notification ) , extra = { "container" : 'injector' } ) raise Exception ( notification ) for key in ( 'user' , 'name' , 'group' , 'chmod' , 'config_path' , 'path' , 'checksum' ) : if key not in template : raise KeyError ( logger . error ( "The injector didn't return a {0}." . format ( key ) ) ) return templates
def _get ( self , api_call , params = None , method = 'GET' , auth = False , file_ = None ) : """Function to preapre API call . Parameters : api _ call ( str ) : API function to be called . params ( str ) : API function parameters . method ( str ) : ( Defauld : GET ) HTTP method ( GET , POST , PUT or DELETE ) file _ ( file ) : File to upload ( only uploads ) . Raise : PybooruError : When ' username ' or ' api _ key ' are not set ."""
url = "{0}/{1}" . format ( self . site_url , api_call ) if method == 'GET' : request_args = { 'params' : params } else : request_args = { 'data' : params , 'files' : file_ } # Adds auth . Also adds auth if username and api _ key are specified # Members + have less restrictions if auth or ( self . username and self . api_key ) : if self . username and self . api_key : request_args [ 'auth' ] = ( self . username , self . api_key ) else : raise PybooruError ( "'username' and 'api_key' attribute of " "Danbooru are required." ) # Do call return self . _request ( url , api_call , request_args , method )
def parse_wyckoff_csv ( wyckoff_file ) : """Parse Wyckoff . csv There are 530 data sets . For one example : 9 : C 1 2 1 : : : : : : : 4 : c : 1 : ( x , y , z ) : ( - x , y , - z ) : : : : 2 : b : 2 : ( 0 , y , 1/2 ) : : : : : 2 : a : 2 : ( 0 , y , 0 ) : : :"""
rowdata = [ ] points = [ ] hP_nums = [ 433 , 436 , 444 , 450 , 452 , 458 , 460 ] for i , line in enumerate ( wyckoff_file ) : if line . strip ( ) == 'end of data' : break rowdata . append ( line . strip ( ) . split ( ':' ) ) # 2 : P - 1 : : : : : < - - store line number if first element is number if rowdata [ - 1 ] [ 0 ] . isdigit ( ) : points . append ( i ) points . append ( i ) wyckoff = [ ] for i in range ( len ( points ) - 1 ) : # 0 to 529 symbol = rowdata [ points [ i ] ] [ 1 ] # e . g . " C 1 2 1" if i + 1 in hP_nums : symbol = symbol . replace ( 'R' , 'H' , 1 ) wyckoff . append ( { 'symbol' : symbol . strip ( ) } ) # When the number of positions is larger than 4, # the positions are written in the next line . # So those positions are connected . for i in range ( len ( points ) - 1 ) : count = 0 wyckoff [ i ] [ 'wyckoff' ] = [ ] for j in range ( points [ i ] + 1 , points [ i + 1 ] ) : # Hook if the third element is a number ( multiplicity ) , e . g . , # 232 : P 2 / b 2 / m 2 / b : : : : : < - ignored # : : 8 : r : 1 : ( x , y , z ) : ( - x , y , - z ) : ( x , - y + 1/2 , - z ) : ( - x , - y + 1/2 , z ) # : : : : : ( - x , - y , - z ) : ( x , - y , z ) : ( - x , y + 1/2 , z ) : ( x , y + 1/2 , - z ) < - ignored # : : 4 : q : . . m : ( x , 0 , z ) : ( - x , 0 , - z ) : ( x , 1/2 , - z ) : ( - x , 1/2 , z ) # : : 4 : p : . . 2 : ( 0 , y , 1/2 ) : ( 0 , - y + 1/2,1/2 ) : ( 0 , - y , 1/2 ) : ( 0 , y + 1/2,1/2) # : : 4 : o : . . 2 : ( 1/2 , y , 0 ) : ( 1/2 , - y + 1/2,0 ) : ( 1/2 , - y , 0 ) : ( 1/2 , y + 1/2,0) if rowdata [ j ] [ 2 ] . isdigit ( ) : pos = [ ] w = { 'letter' : rowdata [ j ] [ 3 ] . strip ( ) , 'multiplicity' : int ( rowdata [ j ] [ 2 ] ) , 'site_symmetry' : rowdata [ j ] [ 4 ] . strip ( ) , 'positions' : pos } wyckoff [ i ] [ 'wyckoff' ] . append ( w ) for k in range ( 4 ) : if rowdata [ j ] [ k + 5 ] : # check if ' ( x , y , z ) ' or ' ' count += 1 pos . append ( rowdata [ j ] [ k + 5 ] ) else : for k in range ( 4 ) : if rowdata [ j ] [ k + 5 ] : count += 1 pos . append ( rowdata [ j ] [ k + 5 ] ) # assertion for w in wyckoff [ i ] [ 'wyckoff' ] : n_pos = len ( w [ 'positions' ] ) n_pos *= len ( lattice_symbols [ wyckoff [ i ] [ 'symbol' ] [ 0 ] ] ) assert n_pos == w [ 'multiplicity' ] return wyckoff
def edit ( parent , profile ) : """Edits the given profile . : param parent | < QWidget > profile | < projexui . widgets . xviewwidget . XViewProfile > : return < bool >"""
dlg = XViewProfileDialog ( parent ) dlg . setProfile ( profile ) if ( dlg . exec_ ( ) ) : return True return False
def get_pair ( self ) : """Get the F2L pair ( corner , edge ) ."""
colours = ( self . cube [ self . pair [ 0 ] ] . colour , self . cube [ self . pair [ 1 ] ] . colour , self . cube [ "D" ] . colour ) result_corner = self . cube . children . copy ( ) for c in colours [ : 2 ] : result_corner &= self . cube . has_colour ( c ) result_edge = result_corner & self . cube . select_type ( "edge" ) result_corner &= self . cube . has_colour ( colours [ 2 ] ) return ( list ( result_corner ) [ 0 ] , list ( result_edge ) [ 0 ] )
def pick_best_coinc ( cls , coinc_results ) : """Choose the best two - ifo coinc by ifar first , then statistic if needed . This function picks which of the available double - ifo coincs to use . It chooses the best ( highest ) ifar . The ranking statistic is used as a tie - breaker . A trials factor is applied if multiple types of coincs are possible at this time given the active ifos . Parameters coinc _ results : list of coinc result dicts Dictionary by detector pair of coinc result dicts . Returns best : coinc results dict If there is a coinc , this will contain the ' best ' one . Otherwise it will return the provided dict ."""
mstat = 0 mifar = 0 mresult = None # record the trials factor from the possible coincs we could # maximize over trials = 0 for result in coinc_results : # Check that a coinc was possible . See the ' add _ singles ' method # to see where this flag was added into the results dict if 'coinc_possible' in result : trials += 1 # Check that a coinc exists if 'foreground/ifar' in result : ifar = result [ 'foreground/ifar' ] stat = result [ 'foreground/stat' ] if ifar > mifar or ( ifar == mifar and stat > mstat ) : mifar = ifar mstat = stat mresult = result # apply trials factor for the best coinc if mresult : mresult [ 'foreground/ifar' ] = mifar / float ( trials ) logging . info ( 'Found %s coinc with ifar %s' , mresult [ 'foreground/type' ] , mresult [ 'foreground/ifar' ] ) return mresult # If no coinc , just return one of the results dictionaries . They will # all contain the same results ( i . e . single triggers ) in this case . else : return coinc_results [ 0 ]
def _paddr ( ins ) : """Returns code sequence which points to local variable or parameter ( HL )"""
output = [ ] oper = ins . quad [ 1 ] indirect = ( oper [ 0 ] == '*' ) if indirect : oper = oper [ 1 : ] I = int ( oper ) if I >= 0 : I += 4 # Return Address + " push IX " output . append ( 'push ix' ) output . append ( 'pop hl' ) output . append ( 'ld de, %i' % I ) output . append ( 'add hl, de' ) if indirect : output . append ( 'ld e, (hl)' ) output . append ( 'inc hl' ) output . append ( 'ld h, (hl)' ) output . append ( 'ld l, e' ) output . append ( 'push hl' ) return output
def _add_exac ( self , variant_obj , info_dict ) : """Add the gmaf frequency Args : variant _ obj ( puzzle . models . Variant ) info _ dict ( dict ) : A info dictionary"""
exac = None exac_keys = [ 'ExAC' , 'EXAC' , 'ExACAF' , 'EXACAF' ] for key in exac_keys : if info_dict . get ( key ) : exac = float ( info_dict [ key ] ) # If not found in vcf search transcripts if not exac : for transcript in variant_obj . transcripts : exac_raw = transcript . ExAC_MAF if exac_raw : exac = float ( exac_raw . split ( ':' ) [ - 1 ] ) if exac : variant_obj . add_frequency ( 'ExAC' , exac )
def _children ( self ) : """Yield all direct children of this object ."""
if isinstance ( self . condition , CodeExpression ) : yield self . condition for codeobj in self . body . _children ( ) : yield codeobj for codeobj in self . else_body . _children ( ) : yield codeobj
def logger ( self ) : '''Lazy logger'''
if self . __logger is None : self . __logger = logging . getLogger ( self . __name ) return self . __logger
def receive_verify_post ( self , post_params ) : """Returns true if the incoming request is an authenticated verify post ."""
if isinstance ( post_params , dict ) : required_params = [ 'action' , 'email' , 'send_id' , 'sig' ] if not self . check_for_valid_postback_actions ( required_params , post_params ) : return False else : return False if post_params [ 'action' ] != 'verify' : return False sig = post_params [ 'sig' ] post_params = post_params . copy ( ) del post_params [ 'sig' ] if sig != get_signature_hash ( post_params , self . secret ) : return False send_response = self . get_send ( post_params [ 'send_id' ] ) try : send_body = send_response . get_body ( ) send_json = json . loads ( send_body ) if 'email' not in send_body : return False if send_json [ 'email' ] != post_params [ 'email' ] : return False except ValueError : return False return True
def user_getfield ( self , field , access_token = None ) : """Request a single field of information about the user . : param field : The name of the field requested . : type field : str : returns : The value of the field . Depending on the type , this may be a string , list , dict , or something else . : rtype : object . . versionadded : : 1.0"""
info = self . user_getinfo ( [ field ] , access_token ) return info . get ( field )
def run ( ) : """CLI main entry point ."""
# Use print ( ) instead of logging when running in CLI mode : set_pyftpsync_logger ( None ) parser = argparse . ArgumentParser ( description = "Synchronize folders over FTP." , epilog = "See also https://github.com/mar10/pyftpsync" , parents = [ verbose_parser ] , ) # Note : we want to allow - - version to be combined with - - verbose . However # on Py2 , argparse makes sub - commands mandatory , unless ` action = " version " ` is used . if check_cli_verbose ( 3 ) > 3 : version_info = "pyftpsync/{} Python/{} {}" . format ( __version__ , PYTHON_VERSION , platform . platform ( ) ) else : version_info = "{}" . format ( __version__ ) parser . add_argument ( "-V" , "--version" , action = "version" , version = version_info ) subparsers = parser . add_subparsers ( help = "sub-command help" ) # - - - Create the parser for the " upload " command - - - - - sp = subparsers . add_parser ( "upload" , parents = [ verbose_parser , common_parser , matcher_parser , creds_parser ] , help = "copy new and modified files to remote folder" , ) sp . add_argument ( "local" , metavar = "LOCAL" , default = "." , help = "path to local folder (default: %(default)s)" , ) sp . add_argument ( "remote" , metavar = "REMOTE" , help = "path to remote folder" ) sp . add_argument ( "--force" , action = "store_true" , help = "overwrite remote files, even if the target is newer " "(but no conflict was detected)" , ) sp . add_argument ( "--resolve" , default = "ask" , choices = [ "local" , "skip" , "ask" ] , help = "conflict resolving strategy (default: '%(default)s')" , ) sp . add_argument ( "--delete" , action = "store_true" , help = "remove remote files if they don't exist locally" , ) sp . add_argument ( "--delete-unmatched" , action = "store_true" , help = "remove remote files if they don't exist locally " "or don't match the current filter (implies '--delete' option)" , ) sp . set_defaults ( command = "upload" ) # - - - Create the parser for the " download " command - - - - - sp = subparsers . add_parser ( "download" , parents = [ verbose_parser , common_parser , matcher_parser , creds_parser ] , help = "copy new and modified files from remote folder to local target" , ) sp . add_argument ( "local" , metavar = "LOCAL" , default = "." , help = "path to local folder (default: %(default)s)" , ) sp . add_argument ( "remote" , metavar = "REMOTE" , help = "path to remote folder" ) sp . add_argument ( "--force" , action = "store_true" , help = "overwrite local files, even if the target is newer " "(but no conflict was detected)" , ) sp . add_argument ( "--resolve" , default = "ask" , choices = [ "remote" , "skip" , "ask" ] , help = "conflict resolving strategy (default: '%(default)s')" , ) sp . add_argument ( "--delete" , action = "store_true" , help = "remove local files if they don't exist on remote target" , ) sp . add_argument ( "--delete-unmatched" , action = "store_true" , help = "remove local files if they don't exist on remote target " "or don't match the current filter (implies '--delete' option)" , ) sp . set_defaults ( command = "download" ) # - - - Create the parser for the " sync " command - - - - - sp = subparsers . add_parser ( "sync" , parents = [ verbose_parser , common_parser , matcher_parser , creds_parser ] , help = "synchronize new and modified files between remote folder and local target" , ) sp . add_argument ( "local" , metavar = "LOCAL" , default = "." , help = "path to local folder (default: %(default)s)" , ) sp . add_argument ( "remote" , metavar = "REMOTE" , help = "path to remote folder" ) sp . add_argument ( "--resolve" , default = "ask" , choices = [ "old" , "new" , "local" , "remote" , "skip" , "ask" ] , help = "conflict resolving strategy (default: '%(default)s')" , ) sp . set_defaults ( command = "sync" ) # - - - Create the parser for the " run " command - - - - - add_run_parser ( subparsers ) # - - - Create the parser for the " scan " command - - - - - add_scan_parser ( subparsers ) # - - - Parse command line - - - - - args = parser . parse_args ( ) args . verbose -= args . quiet del args . quiet # print ( " verbose " , args . verbose ) ftp_debug = 0 if args . verbose >= 6 : ftp_debug = 1 # Modify the ` args ` from the ` pyftpsync . yaml ` config : if getattr ( args , "command" , None ) == "run" : handle_run_command ( parser , args ) if callable ( getattr ( args , "command" , None ) ) : # scan _ handler try : return args . command ( parser , args ) except KeyboardInterrupt : print ( "\nAborted by user." , file = sys . stderr ) sys . exit ( 3 ) elif not hasattr ( args , "command" ) : parser . error ( "missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')" ) # Post - process and check arguments if hasattr ( args , "delete_unmatched" ) and args . delete_unmatched : args . delete = True args . local_target = make_target ( args . local , { "ftp_debug" : ftp_debug } ) if args . remote == "." : parser . error ( "'.' is expected to be the local target (not remote)" ) args . remote_target = make_target ( args . remote , { "ftp_debug" : ftp_debug } ) if not isinstance ( args . local_target , FsTarget ) and isinstance ( args . remote_target , FsTarget ) : parser . error ( "a file system target is expected to be local" ) # Let the command handler do its thing opts = namespace_to_dict ( args ) if args . command == "upload" : s = UploadSynchronizer ( args . local_target , args . remote_target , opts ) elif args . command == "download" : s = DownloadSynchronizer ( args . local_target , args . remote_target , opts ) elif args . command == "sync" : s = BiDirSynchronizer ( args . local_target , args . remote_target , opts ) else : parser . error ( "unknown command '{}'" . format ( args . command ) ) s . is_script = True try : s . run ( ) except KeyboardInterrupt : print ( "\nAborted by user." , file = sys . stderr ) sys . exit ( 3 ) finally : # Prevent sporadic exceptions in ftplib , when closing in _ _ del _ _ s . local . close ( ) s . remote . close ( ) stats = s . get_stats ( ) if args . verbose >= 5 : pprint ( stats ) elif args . verbose >= 1 : if args . dry_run : print ( "(DRY-RUN) " , end = "" ) print ( "Wrote {}/{} files in {} directories, skipped: {}." . format ( stats [ "files_written" ] , stats [ "local_files" ] , stats [ "local_dirs" ] , stats [ "conflict_files_skipped" ] , ) , end = "" , ) if stats [ "interactive_ask" ] : print ( ) else : print ( " Elap: {}." . format ( stats [ "elap_str" ] ) ) return
def _revint ( self , version ) : '''Internal function to convert a version string to an integer .'''
intrev = 0 vsplit = version . split ( '.' ) for c in range ( len ( vsplit ) ) : item = int ( vsplit [ c ] ) * ( 10 ** ( ( ( len ( vsplit ) - c - 1 ) * 2 ) ) ) intrev += item return intrev
def _prepare_commit_msg ( tmp_file , author , files_modified = None , template = None ) : """Prepare the commit message in tmp _ file . It will build the commit message prefilling the component line , as well as the signature using the git author and the modified files . The file remains untouched if it is not empty ."""
files_modified = files_modified or [ ] template = template or "{component}:\n\nSigned-off-by: {author}\n{extra}" if hasattr ( template , "decode" ) : template = template . decode ( ) with open ( tmp_file , "r" , "utf-8" ) as fh : contents = fh . readlines ( ) msg = filter ( lambda x : not ( x . startswith ( "#" ) or x . isspace ( ) ) , contents ) if len ( list ( msg ) ) : return component = "unknown" components = _get_components ( files_modified ) if len ( components ) == 1 : component = components [ 0 ] elif len ( components ) > 1 : component = "/" . join ( components ) contents . append ( "# WARNING: Multiple components detected - consider splitting " "commit.\r\n" ) with open ( tmp_file , "w" , "utf-8" ) as fh : fh . write ( template . format ( component = component , author = author , extra = "" . join ( contents ) ) )
def key_file ( self ) : """Get the path to the key file containig our auth key , or None ."""
if self . auth_key : key_file_path = os . path . join ( orchestration_mkdtemp ( ) , 'key' ) with open ( key_file_path , 'w' ) as fd : fd . write ( self . auth_key ) os . chmod ( key_file_path , stat . S_IRUSR ) return key_file_path
def set_key ( dotenv_path , key_to_set , value_to_set , quote_mode = "always" ) : """Adds or Updates a key / value to the given . env If the . env path given doesn ' t exist , fails instead of risking creating an orphan . env somewhere in the filesystem"""
value_to_set = value_to_set . strip ( "'" ) . strip ( '"' ) if not os . path . exists ( dotenv_path ) : warnings . warn ( "can't write to %s - it doesn't exist." % dotenv_path ) return None , key_to_set , value_to_set if " " in value_to_set : quote_mode = "always" line_template = '{}="{}"\n' if quote_mode == "always" else '{}={}\n' line_out = line_template . format ( key_to_set , value_to_set ) with rewrite ( dotenv_path ) as ( source , dest ) : replaced = False for mapping in parse_stream ( source ) : if mapping . key == key_to_set : dest . write ( line_out ) replaced = True else : dest . write ( mapping . original ) if not replaced : dest . write ( line_out ) return True , key_to_set , value_to_set
def wait_for_deps ( self , conf , images ) : """Wait for all our dependencies"""
from harpoon . option_spec . image_objs import WaitCondition api = conf . harpoon . docker_context_maker ( ) . api waited = set ( ) last_attempt = { } dependencies = set ( dep for dep , _ in conf . dependency_images ( ) ) # Wait conditions come from dependency _ options first # Or if none specified there , they come from the image itself wait_conditions = { } for dependency in dependencies : if conf . dependency_options is not NotSpecified and dependency in conf . dependency_options and conf . dependency_options [ dependency ] . wait_condition is not NotSpecified : wait_conditions [ dependency ] = conf . dependency_options [ dependency ] . wait_condition elif images [ dependency ] . wait_condition is not NotSpecified : wait_conditions [ dependency ] = images [ dependency ] . wait_condition if not wait_conditions : return start = time . time ( ) while True : this_round = [ ] for dependency in dependencies : if dependency in waited : continue image = images [ dependency ] if dependency in wait_conditions : done = self . wait_for_dep ( api , image , wait_conditions [ dependency ] , start , last_attempt . get ( dependency ) ) this_round . append ( done ) if done is True : waited . add ( dependency ) elif done is False : last_attempt [ dependency ] = time . time ( ) elif done is WaitCondition . Timedout : log . warning ( "Stopping dependency because it timedout waiting\tcontainer_id=%s" , image . container_id ) self . stop_container ( image ) else : waited . add ( dependency ) if set ( this_round ) != set ( [ WaitCondition . KeepWaiting ] ) : if dependencies - waited == set ( ) : log . info ( "Finished waiting for dependencies" ) break else : log . info ( "Still waiting for dependencies\twaiting_on=%s" , list ( dependencies - waited ) ) couldnt_wait = set ( ) container_ids = { } for dependency in dependencies : if dependency in waited : continue image = images [ dependency ] if image . container_id is None : stopped = True if dependency not in container_ids : available = sorted ( [ i for i in available if "/{0}" . format ( image . container_name ) in i [ "Names" ] ] , key = lambda i : i [ "Created" ] ) if available : container_ids [ dependency ] = available [ 0 ] [ "Id" ] else : if dependency not in container_ids : container_ids [ dependency ] = image . container_id stopped , _ = self . is_stopped ( image , image . container_id ) if stopped : couldnt_wait . add ( dependency ) if couldnt_wait : for container in couldnt_wait : if container not in images or container not in container_ids : continue image = images [ container ] container_id = container_ids [ container ] container_name = image . container_name hp . write_to ( conf . harpoon . stdout , "=================== Logs for failed container {0} ({1})\n" . format ( container_id , container_name ) ) for line in conf . harpoon . docker_api . logs ( container_id ) . split ( "\n" ) : hp . write_to ( conf . harpoon . stdout , "{0}\n" . format ( line ) ) hp . write_to ( conf . harpoon . stdout , "------------------- End logs for failed container\n" ) raise BadImage ( "One or more of the dependencies stopped running whilst waiting for other dependencies" , stopped = list ( couldnt_wait ) ) time . sleep ( 0.1 )
def learnSequences ( self , sequences ) : """Learns all provided sequences . Always reset the network in between sequences . Sequences format : sequences = [ set ( [ 16 , 22 , 32 ] ) , # S0 , position 0 set ( [ 13 , 15 , 33 ] ) # S0 , position 1 set ( [ 6 , 12 , 52 ] ) , # S1 , position 0 set ( [ 6 , 2 , 15 ] ) # S1 , position 1 Note that the order of each sequence is important . It denotes the sequence number and will be used during inference to plot accuracy . Parameters : @ param sequences ( list ) Sequences to learn , in the canonical format specified above"""
# This method goes through four phases : # 1 ) We first train L4 on the sequences , over multiple passes # 2 ) We then train L2 in one pass . # 3 ) We then continue training on L4 so the apical segments learn # 4 ) We run inference to store L2 representations for each sequence # retrieve L2 representations # print " 1 ) Train L4 sequence memory " # We ' re now using online learning , so both layers should be trying to learn # at all times . sequence_order = range ( len ( sequences ) ) if self . config [ "L2Params" ] [ "onlineLearning" ] : # Train L2 and L4 self . _setLearningMode ( l4Learning = True , l2Learning = True ) for _ in xrange ( self . numLearningPoints ) : random . shuffle ( sequence_order ) for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) # This is equivalent to , and faster than , giving the network no input # for a period of time . self . sendReset ( ) else : # Train L4 self . _setLearningMode ( l4Learning = True , l2Learning = False ) for _ in xrange ( self . numLearningPoints ) : random . shuffle ( sequence_order ) for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) # This is equivalent to , and faster than , giving the network no input # for a period of time . self . sendReset ( ) # Train L2 self . _setLearningMode ( l4Learning = False , l2Learning = True ) for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . sendReset ( ) # Train L4 apical segments self . _setLearningMode ( l4Learning = True , l2Learning = False ) for _ in xrange ( 5 ) : for i in sequence_order : sequence = sequences [ i ] for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . sendReset ( ) self . _setLearningMode ( l4Learning = False , l2Learning = False ) self . sendReset ( ) for sequenceNum , sequence in enumerate ( sequences ) : for s in sequence : self . sensorInputs [ 0 ] . addDataToQueue ( list ( s ) , 0 , 0 ) self . network . run ( 1 ) self . objectL2Representations [ sequenceNum ] = self . getL2Representations ( ) self . sendReset ( ) return
def set_action_cache ( self , action_key , data ) : """Store action needs and excludes . . . note : : The action is saved only if a cache system is defined . : param action _ key : The unique action name . : param data : The action to be saved ."""
if self . cache : self . cache . set ( self . app . config [ 'ACCESS_ACTION_CACHE_PREFIX' ] + action_key , data )
def events ( self ) : """: rtype : twilio . rest . monitor . v1 . event . EventList"""
if self . _events is None : self . _events = EventList ( self ) return self . _events
def fetchMyCgi ( self ) : """Fetches statistics from my _ cgi . cgi"""
try : response = urlopen ( Request ( 'http://{}/my_cgi.cgi' . format ( self . ip ) , b'request=create_chklst' ) ) ; except ( HTTPError , URLError ) : _LOGGER . warning ( "Failed to open url to {}" . format ( self . ip ) ) self . _error_report = True return None lines = response . readlines ( ) return { line . decode ( ) . split ( ':' ) [ 0 ] . strip ( ) : line . decode ( ) . split ( ':' ) [ 1 ] . strip ( ) for line in lines }
def array_scanlines_interlace ( self , pixels ) : """Generator for interlaced scanlines from an array . ` pixels ` is the full source image as a single array of values . The generator yields each scanline of the reduced passes in turn , each scanline being a sequence of values ."""
# http : / / www . w3 . org / TR / PNG / # 8InterlaceMethods # Array type . fmt = 'BH' [ self . bitdepth > 8 ] # Value per row vpr = self . width * self . planes # Each iteration generates a scanline starting at ( x , y ) # and consisting of every xstep pixels . for lines in adam7_generate ( self . width , self . height ) : for x , y , xstep in lines : # Pixels per row ( of reduced image ) ppr = int ( math . ceil ( ( self . width - x ) / float ( xstep ) ) ) # Values per row ( of reduced image ) reduced_row_len = ppr * self . planes if xstep == 1 : # Easy case : line is a simple slice . offset = y * vpr yield pixels [ offset : offset + vpr ] continue # We have to step by xstep , # which we can do one plane at a time # using the step in Python slices . row = array ( fmt ) # There ' s no easier way to set the length of an array row . extend ( pixels [ 0 : reduced_row_len ] ) offset = y * vpr + x * self . planes end_offset = ( y + 1 ) * vpr skip = self . planes * xstep for i in range ( self . planes ) : row [ i : : self . planes ] = pixels [ offset + i : end_offset : skip ] yield row
def on_message ( self , client , userdata , msg ) : '''Callback for when a ` ` PUBLISH ` ` message is received from the broker .'''
if msg . topic == 'serial_device/refresh_comports' : self . refresh_comports ( ) return match = CRE_MANAGER . match ( msg . topic ) if match is None : logger . debug ( 'Topic NOT matched: `%s`' , msg . topic ) else : logger . debug ( 'Topic matched: `%s`' , msg . topic ) # Message topic matches command . Handle request . command = match . group ( 'command' ) port = match . group ( 'port' ) # serial _ device / < port > / send # Bytes to send if command == 'send' : self . _serial_send ( port , msg . payload ) elif command == 'connect' : # serial _ device / < port > / connect # Request connection try : request = json . loads ( msg . payload ) except ValueError as exception : logger . error ( 'Error decoding "%s (%s)" request: %s' , command , port , exception ) return self . _serial_connect ( port , request ) elif command == 'close' : self . _serial_close ( port )
def wait_for_mouse_move_from ( self , origin_x , origin_y ) : """Wait for the mouse to move from a location . This function will block until the condition has been satisified . : param origin _ x : the X position you expect the mouse to move from : param origin _ y : the Y position you expect the mouse to move from"""
_libxdo . xdo_wait_for_mouse_move_from ( self . _xdo , origin_x , origin_y )
def _decode_telegram_base64 ( string ) : """Decodes an url - safe base64 - encoded string into its bytes by first adding the stripped necessary padding characters . This is the way Telegram shares binary data as strings , such as Bot API - style file IDs or invite links . Returns ` ` None ` ` if the input string was not valid ."""
try : return base64 . urlsafe_b64decode ( string + '=' * ( len ( string ) % 4 ) ) except ( binascii . Error , ValueError , TypeError ) : return None
def filter ( self , userinfo , user_info_claims = None ) : """Return only those claims that are asked for . It ' s a best effort task ; if essential claims are not present no error is flagged . : param userinfo : A dictionary containing the available info for one user : param user _ info _ claims : A dictionary specifying the asked for claims : return : A dictionary of filtered claims ."""
if user_info_claims is None : return copy . copy ( userinfo ) else : result = { } missing = [ ] optional = [ ] for key , restr in user_info_claims . items ( ) : try : result [ key ] = userinfo [ key ] except KeyError : if restr == { "essential" : True } : missing . append ( key ) else : optional . append ( key ) return result
def add_papyrus_routes ( self , route_name_prefix , base_url ) : """A helper method that adds routes to view callables that , together , implement the MapFish HTTP interface . Example : : import papyrus config . include ( papyrus ) config . add _ papyrus _ routes ( ' spots ' , ' / spots ' ) config . scan ( ) Arguments : ` ` route _ name _ prefix ' The prefix used for the route names passed to ` ` config . add _ route ` ` . ` ` base _ url ` ` The web service ' s base URL , e . g . ` ` / spots ` ` . No trailing slash !"""
route_name = route_name_prefix + '_read_many' self . add_route ( route_name , base_url , request_method = 'GET' ) route_name = route_name_prefix + '_read_one' self . add_route ( route_name , base_url + '/{id}' , request_method = 'GET' ) route_name = route_name_prefix + '_count' self . add_route ( route_name , base_url + '/count' , request_method = 'GET' ) route_name = route_name_prefix + '_create' self . add_route ( route_name , base_url , request_method = 'POST' ) route_name = route_name_prefix + '_update' self . add_route ( route_name , base_url + '/{id}' , request_method = 'PUT' ) route_name = route_name_prefix + '_delete' self . add_route ( route_name , base_url + '/{id}' , request_method = 'DELETE' )
def set_meta ( self , meta_data ) : '''node . set _ meta ( meta ) yields a calculation node identical to the given node except that its meta _ data attribute has been set to the given dictionary meta . If meta is not persistent , it is cast to a persistent dictionary first .'''
if not ( isinstance ( meta_data , ps . PMap ) or isinstance ( meta_data , IMap ) ) : meta_data = ps . pmap ( meta_data ) new_cnode = copy . copy ( self ) object . __setattr__ ( new_cnode , 'meta_data' , meta_data ) return new_cnode
def get_semantic_data ( self , path_as_list ) : """Retrieves an entry of the semantic data . : param list path _ as _ list : The path in the vividict to retrieve the value from : return :"""
target_dict = self . semantic_data for path_element in path_as_list : if path_element in target_dict : target_dict = target_dict [ path_element ] else : raise KeyError ( "The state with name {1} and id {2} holds no semantic data with path {0}." "" . format ( path_as_list [ : path_as_list . index ( path_element ) + 1 ] , self . name , self . state_id ) ) return target_dict
def create_named_notebook ( fname , context ) : """Create a named notebook if one doesn ' t exist ."""
if os . path . exists ( fname ) : return from nbformat import v4 as nbf # Courtesy of http : / / nbviewer . ipython . org / gist / fperez / 9716279 text = "Welcome to *pyramid_notebook!* Use *File* *>* *Shutdown* to close this." cells = [ nbf . new_markdown_cell ( text ) ] greeting = context . get ( "greeting" ) if greeting : cells . append ( nbf . new_markdown_cell ( greeting ) ) cells . append ( nbf . new_code_cell ( '' ) ) nb = nbf . new_notebook ( cells = cells ) with open ( fname , 'w' ) as f : writer = JSONWriter ( ) writer . write ( nb , f )
def recordParser ( paper ) : """This is function that is used to create [ Records ] ( . . / classes / Record . html # metaknowledge . Record ) from files . * * recordParser * * ( ) reads the file _ paper _ until it reaches ' ER ' . For each field tag it adds an entry to the returned dict with the tag as the key and a list of the entries as the value , the list has each line separately , so for the following two lines in a record : AF BREVIK , I ANICIN , B The entry in the returned dict would be ` { ' AF ' : [ " BREVIK , I " , " ANICIN , B " ] } ` ` Record ` objects can be created with these dictionaries as the initializer . # Parameters _ paper _ : ` file stream ` > An open file , with the current line at the beginning of the WOS record . # Returns ` OrderedDict [ str : List [ str ] ] ` > A dictionary mapping WOS tags to lists , the lists are of strings , each string is a line of the record associated with the tag ."""
tagList = [ ] doneReading = False l = ( 0 , '' ) for l in paper : if len ( l [ 1 ] ) < 3 : # Line too short raise BadWOSRecord ( "Missing field on line {} : {}" . format ( l [ 0 ] , l [ 1 ] ) ) elif 'ER' in l [ 1 ] [ : 2 ] : # Reached the end of the record doneReading = True break elif l [ 1 ] [ 2 ] != ' ' : # Field tag longer than 2 or offset in some way raise BadWOSFile ( "Field tag not formed correctly on line " + str ( l [ 0 ] ) + " : " + l [ 1 ] ) elif ' ' in l [ 1 ] [ : 3 ] : # the string is three spaces in row # No new tag append line to current tag ( last tag in tagList ) tagList [ - 1 ] [ 1 ] . append ( l [ 1 ] [ 3 : - 1 ] ) else : # New tag create new entry at the end of tagList tagList . append ( ( l [ 1 ] [ : 2 ] , [ l [ 1 ] [ 3 : - 1 ] ] ) ) if not doneReading : raise BadWOSRecord ( "End of file reached before ER: {}" . format ( l [ 1 ] ) ) else : retdict = collections . OrderedDict ( tagList ) if len ( retdict ) == len ( tagList ) : return retdict else : dupSet = set ( ) for tupl in tagList : if tupl [ 0 ] in retdict : dupSet . add ( tupl [ 0 ] ) raise BadWOSRecord ( "Duplicate tags (" + ', ' . join ( dupSet ) + ") in record" )
def updateData ( self , signal , fs ) : """Displays a spectrogram of the provided signal : param signal : 1 - D signal of audio : type signal : numpy . ndarray : param fs : samplerate of signal : type fs : int"""
# use a separate thread to calculate spectrogram so UI doesn ' t lag t = threading . Thread ( target = _doSpectrogram , args = ( self . spec_done , ( fs , signal ) , ) , kwargs = self . specgramArgs ) t . start ( )
def _patched_method ( self , method , * args , ** kwargs ) : """Step 4 ( 1st flow ) . Call method"""
self . _validate ( ) result = method ( * args , ** kwargs ) self . _validate ( ) return result
def returnDepositsWithdrawals ( self , start = 0 , end = 2 ** 32 - 1 ) : """Returns your deposit and withdrawal history within a range , specified by the " start " and " end " POST parameters , both of which should be given as UNIX timestamps ."""
return self . _private ( 'returnDepositsWithdrawals' , start = start , end = end )
def vnic_add_new_to_vm_task ( vm , network , logger ) : """Compose new vNIC and attach it to VM & connect to Network : param nicspec : < vim . vm . VM > : param network : < vim network obj > : return : < Task >"""
nicspes = VNicService . vnic_new_attached_to_network ( network ) task = VNicService . vnic_add_to_vm_task ( nicspes , vm , logger ) return task
def make_pass_decorator ( object_type , ensure = False ) : """Given an object type this creates a decorator that will work similar to : func : ` pass _ obj ` but instead of passing the object of the current context , it will find the innermost context of type : func : ` object _ type ` . This generates a decorator that works roughly like this : : from functools import update _ wrapper def decorator ( f ) : @ pass _ context def new _ func ( ctx , * args , * * kwargs ) : obj = ctx . find _ object ( object _ type ) return ctx . invoke ( f , obj , * args , * * kwargs ) return update _ wrapper ( new _ func , f ) return decorator : param object _ type : the type of the object to pass . : param ensure : if set to ` True ` , a new object will be created and remembered on the context if it ' s not there yet ."""
def decorator ( f ) : def new_func ( * args , ** kwargs ) : ctx = get_current_context ( ) if ensure : obj = ctx . ensure_object ( object_type ) else : obj = ctx . find_object ( object_type ) if obj is None : raise RuntimeError ( 'Managed to invoke callback without a ' 'context object of type %r existing' % object_type . __name__ ) return ctx . invoke ( f , obj , * args , ** kwargs ) return update_wrapper ( new_func , f ) return decorator
def add_to_space_size ( self , addition_bytes ) : # type : ( int ) - > None '''A method to add bytes to the space size tracked by this Volume Descriptor . Parameters : addition _ bytes - The number of bytes to add to the space size . Returns : Nothing .'''
if not self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'This Volume Descriptor is not yet initialized' ) # The ' addition ' parameter is expected to be in bytes , but the space # size we track is in extents . Round up to the next extent . self . space_size += utils . ceiling_div ( addition_bytes , self . log_block_size )
def start_module ( self ) : """Wrapper for _ main function . Catch and raise any exception occurring in the main function : return : None"""
try : self . _main ( ) except Exception as exp : logger . exception ( '%s' , traceback . format_exc ( ) ) raise Exception ( exp )
def adjust_logging ( self , context ) : """Adjust logging configuration . : param context : The guacamole context object . This method uses the context and the results of early argument parsing to adjust the configuration of the logging subsystem . In practice the values passed to ` ` - - log - level ` ` and ` ` - - trace ` ` are applied ."""
if context . early_args . log_level : log_level = context . early_args . log_level logging . getLogger ( "" ) . setLevel ( log_level ) for name in context . early_args . trace : logging . getLogger ( name ) . setLevel ( logging . DEBUG ) _logger . info ( "Enabled tracing on logger %r" , name )
def execute_command ( self ) : """Execute the shell command ."""
stderr = "" role_count = 0 for role in utils . roles_dict ( self . roles_path ) : self . command = self . command . replace ( "%role_name" , role ) ( _ , err ) = utils . capture_shell ( "cd {0} && {1}" . format ( os . path . join ( self . roles_path , role ) , self . command ) ) stderr = err role_count += 1 utils . exit_if_no_roles ( role_count , self . roles_path ) if len ( stderr ) > 0 : ui . error ( c . MESSAGES [ "run_error" ] , stderr [ : - 1 ] ) else : if not self . config [ "options_quiet" ] : ui . ok ( c . MESSAGES [ "run_success" ] . replace ( "%role_count" , str ( role_count ) ) , self . options . command )
def fit ( self , X , y = None , ** kwargs ) : """The fit method is the primary drawing input for the dispersion visualization . Parameters X : list or generator Should be provided as a list of documents or a generator that yields a list of documents that contain a list of words in the order they appear in the document . y : ndarray or Series of length n An optional array or series of target or class values for instances . If this is specified , then the points will be colored according to their class . kwargs : dict Pass generic arguments to the drawing method Returns self : instance Returns the instance of the transformer / visualizer"""
if y is not None : self . classes_ = np . unique ( y ) elif y is None and self . labels is not None : self . classes_ = np . array ( [ self . labels [ 0 ] ] ) else : self . classes_ = np . array ( [ self . NULL_CLASS ] ) # Create an index ( e . g . the y position ) for the target words self . indexed_words_ = np . flip ( self . target_words , axis = 0 ) if self . ignore_case : self . indexed_words_ = np . array ( [ w . lower ( ) for w in self . indexed_words_ ] ) # Stack is used to create a 2D array from the generator try : points_target = np . stack ( self . _compute_dispersion ( X , y ) ) except ValueError : raise YellowbrickValueError ( ( "No indexed words were found in the corpus" ) ) points = np . stack ( zip ( points_target [ : , 0 ] . astype ( int ) , points_target [ : , 1 ] . astype ( int ) ) ) self . target = points_target [ : , 2 ] self . _check_missing_words ( points ) self . draw ( points , self . target ) return self
def get_resolution ( self ) -> list : '''Show device resolution .'''
output , _ = self . _execute ( '-s' , self . device_sn , 'shell' , 'wm' , 'size' ) return output . split ( ) [ 2 ] . split ( 'x' )
def save_optimizer_for_phase ( phase ) : """Save the optimizer associated with the phase as a pickle"""
with open ( make_optimizer_pickle_path ( phase ) , "w+b" ) as f : f . write ( pickle . dumps ( phase . optimizer ) )
def _get_exceptions_db ( self ) : """Return a list of dictionaries suitable to be used with ptrie module ."""
template = "{extype} ({exmsg}){raised}" if not self . _full_cname : # When full callable name is not used the calling path is # irrelevant and there is no function associated with an # exception ret = [ ] for _ , fdict in self . _ex_dict . items ( ) : for key in fdict . keys ( ) : ret . append ( { "name" : fdict [ key ] [ "name" ] , "data" : template . format ( extype = _ex_type_str ( key [ 0 ] ) , exmsg = key [ 1 ] , raised = "*" if fdict [ key ] [ "raised" ] [ 0 ] else "" , ) , } ) return ret # When full callable name is used , all calling paths are saved ret = [ ] for fdict in self . _ex_dict . values ( ) : for key in fdict . keys ( ) : for func_name in fdict [ key ] [ "function" ] : rindex = fdict [ key ] [ "function" ] . index ( func_name ) raised = fdict [ key ] [ "raised" ] [ rindex ] ret . append ( { "name" : self . decode_call ( func_name ) , "data" : template . format ( extype = _ex_type_str ( key [ 0 ] ) , exmsg = key [ 1 ] , raised = "*" if raised else "" , ) , } ) return ret
def initializenb ( ) : """Find input files and log initialization info"""
logger . info ( 'Working directory: {0}' . format ( os . getcwd ( ) ) ) logger . info ( 'Run on {0}' . format ( asctime ( ) ) ) try : fileroot = os . environ [ 'fileroot' ] logger . info ( 'Setting fileroot to {0} from environment variable.\n' . format ( fileroot ) ) candsfile = 'cands_{0}_merge.pkl' . format ( fileroot ) noisefile = 'noise_{0}_merge.pkl' . format ( fileroot ) except KeyError : sdmdir = os . getcwd ( ) logger . info ( 'Setting sdmdir to current directory {0}\n' . format ( os . path . abspath ( sdmdir ) ) ) candsfiles = glob . glob ( 'cands_*_merge.pkl' ) noisefiles = glob . glob ( 'noise_*_merge.pkl' ) if len ( candsfiles ) == 1 and len ( noisefiles ) == 1 : logger . info ( 'Found one cands/merge file set' ) else : logger . warn ( 'Found multiple cands/noise file sets. Taking first.' ) candsfile = candsfiles [ 0 ] noisefile = noisefiles [ 0 ] fileroot = candsfile . rstrip ( '_merge.pkl' ) . lstrip ( 'cands_' ) logger . info ( 'Set: \n\t candsfile {} \n\t noisefile {} \n\t fileroot {} ' . format ( candsfile , noisefile , fileroot ) ) return ( candsfile , noisefile , fileroot )
def get_default_config ( self ) : """Return the default config for the handler"""
config = super ( StatsdHandler , self ) . get_default_config ( ) config . update ( { 'host' : '' , 'port' : 1234 , 'batch' : 1 , } ) return config
def main ( args = sys . argv [ 1 : ] ) : """Run the commandline script"""
usage = "%prog --help" parser = OptionParser ( usage , add_help_option = False ) parser . add_option ( '-c' , '--config' , help = "Configuration file to use" , action = 'store' , type = 'string' , dest = 'config_file' ) parser . add_option ( '-h' , '-H' , '--host' , help = ( "Hostname of the Trovebox server " "(overrides config_file)" ) , action = 'store' , type = 'string' , dest = 'host' ) parser . add_option ( '-X' , help = "Method to use (GET or POST)" , action = 'store' , type = 'choice' , dest = 'method' , choices = ( 'GET' , 'POST' ) , default = "GET" ) parser . add_option ( '-F' , help = "Endpoint field" , action = 'append' , type = 'string' , dest = 'fields' ) parser . add_option ( '-e' , help = "Endpoint to call" , action = 'store' , type = 'string' , dest = 'endpoint' , default = '/photos/list.json' ) parser . add_option ( '-p' , help = "Pretty print the json" , action = "store_true" , dest = "pretty" , default = False ) parser . add_option ( '-v' , help = "Verbose output" , action = "store_true" , dest = "verbose" , default = False ) parser . add_option ( '--version' , help = "Display the current version" , action = "store_true" ) parser . add_option ( '--help' , help = 'show this help message' , action = "store_true" ) options , args = parser . parse_args ( args ) if options . help : parser . print_help ( ) return if options . version : print ( trovebox . __version__ ) return if args : parser . error ( "Unknown argument: %s" % args ) params = { } if options . fields : for field in options . fields : ( key , value ) = field . split ( '=' ) params [ key ] = value # Host option overrides config file settings if options . host : client = trovebox . Trovebox ( host = options . host ) else : try : client = trovebox . Trovebox ( config_file = options . config_file ) except IOError as error : print ( error ) print ( CONFIG_ERROR ) print ( error ) sys . exit ( 1 ) if options . method == "GET" : result = client . get ( options . endpoint , process_response = False , ** params ) else : params , files = extract_files ( params ) result = client . post ( options . endpoint , process_response = False , files = files , ** params ) for file_ in files : files [ file_ ] . close ( ) if options . verbose : print ( "==========\nMethod: %s\nHost: %s\nEndpoint: %s" % ( options . method , client . host , options . endpoint ) ) if params : print ( "Fields:" ) for key , value in params . items ( ) : print ( " %s=%s" % ( key , value ) ) print ( "==========\n" ) if options . pretty : print ( json . dumps ( json . loads ( result ) , sort_keys = True , indent = 4 , separators = ( ',' , ':' ) ) ) else : print ( result )
def find_module ( self , name ) : """Find the Module by its name ."""
defmodule = lib . EnvFindDefmodule ( self . _env , name . encode ( ) ) if defmodule == ffi . NULL : raise LookupError ( "Module '%s' not found" % name ) return Module ( self . _env , defmodule )
def register ( self , notification_cls = None ) : """Registers a Notification class unique by name ."""
self . loaded = True display_names = [ n . display_name for n in self . registry . values ( ) ] if ( notification_cls . name not in self . registry and notification_cls . display_name not in display_names ) : self . registry . update ( { notification_cls . name : notification_cls } ) models = getattr ( notification_cls , "models" , [ ] ) if not models and getattr ( notification_cls , "model" , None ) : models = [ getattr ( notification_cls , "model" ) ] for model in models : try : if notification_cls . name not in [ n . name for n in self . models [ model ] ] : self . models [ model ] . append ( notification_cls ) except KeyError : self . models . update ( { model : [ notification_cls ] } ) else : raise AlreadyRegistered ( f"Notification {notification_cls.name}: " f"{notification_cls.display_name} is already registered." )
def ToCategorizedPath ( path_type , components ) : """Translates a path type and a list of components to a categorized path ."""
try : prefix = { PathInfo . PathType . OS : ( "fs" , "os" ) , PathInfo . PathType . TSK : ( "fs" , "tsk" ) , PathInfo . PathType . REGISTRY : ( "registry" , ) , PathInfo . PathType . TEMP : ( "temp" , ) , } [ path_type ] except KeyError : raise ValueError ( "Unknown path type: `%s`" % path_type ) return "/" . join ( itertools . chain ( prefix , components ) )
def parse_doc ( doc ) : """Exract list of sentences containing ( text , label ) pairs ."""
word_spans = [ ] sentence_spans = [ ] sentence_chunks = doc . split ( '\n\n' ) sentences = [ ] for chunk in sentence_chunks : sent_texts , sent_labels = get_texts_and_labels ( chunk . strip ( ) ) sentences . append ( list ( zip ( sent_texts , sent_labels ) ) ) return sentences
def networks ( self , names = None , ids = None , filters = None ) : """List networks . Similar to the ` ` docker networks ls ` ` command . Args : names ( : py : class : ` list ` ) : List of names to filter by ids ( : py : class : ` list ` ) : List of ids to filter by filters ( dict ) : Filters to be processed on the network list . Available filters : - ` ` driver = [ < driver - name > ] ` ` Matches a network ' s driver . - ` ` label = [ < key > ] ` ` or ` ` label = [ < key > = < value > ] ` ` . - ` ` type = [ " custom " | " builtin " ] ` ` Filters networks by type . Returns : ( dict ) : List of network objects . Raises : : py : class : ` docker . errors . APIError ` If the server returns an error ."""
if filters is None : filters = { } if names : filters [ 'name' ] = names if ids : filters [ 'id' ] = ids params = { 'filters' : utils . convert_filters ( filters ) } url = self . _url ( "/networks" ) res = self . _get ( url , params = params ) return self . _result ( res , json = True )
def parse ( self , s , term_join = None ) : """Parses search term to Args : s ( str ) : string with search term . or _ join ( callable ) : function to join ' OR ' terms . Returns : dict : all of the terms grouped by marker . Key is a marker , value is a term . Example : > > > SearchTermParser ( ) . parse ( ' table2 from 1978 to 1979 in california ' ) { ' to ' : 1979 , ' about ' : ' table2 ' , ' from ' : 1978 , ' in ' : ' california ' }"""
if not term_join : term_join = lambda x : '(' + ' OR ' . join ( x ) + ')' toks = self . scan ( s ) # Examples : starting with this query : # diabetes from 2014 to 2016 source healthindicators . gov # Assume the first term is ABOUT , if it is not marked with a marker . if toks and toks [ 0 ] and ( toks [ 0 ] [ 0 ] == self . TERM or toks [ 0 ] [ 0 ] == self . QUOTEDTERM ) : toks = [ ( self . MARKER , 'about' ) ] + toks # The example query produces this list of tokens : # [ ( 3 , ' about ' ) , # (0 , ' diabetes ' ) , # (3 , ' from ' ) , # (4 , 2014 ) , # (3 , ' to ' ) , # (4 , 2016 ) , # (3 , ' source ' ) , # (0 , ' healthindicators . gov ' ) ] # Group the terms by their marker . bymarker = [ ] for t in toks : if t [ 0 ] == self . MARKER : bymarker . append ( ( t [ 1 ] , [ ] ) ) else : bymarker [ - 1 ] [ 1 ] . append ( t ) # After grouping tokens by their markers # [ ( ' about ' , [ ( 0 , ' diabetes ' ) ] ) , # ( ' from ' , [ ( 4 , 2014 ) ] ) , # ( ' to ' , [ ( 4 , 2016 ) ] ) , # ( ' source ' , [ ( 0 , ' healthindicators . gov ' ) ] ) # Convert some of the markers based on their contents . This just changes the marker type for keywords # we ' ll do more adjustments later . comps = [ ] for t in bymarker : t = list ( t ) if t [ 0 ] == 'in' and len ( t [ 1 ] ) == 1 and isinstance ( t [ 1 ] [ 0 ] [ 1 ] , string_types ) and self . stem ( t [ 1 ] [ 0 ] [ 1 ] ) in self . geograins . keys ( ) : t [ 0 ] = 'by' # If the from term isn ' t an integer , then it is really a source . if t [ 0 ] == 'from' and len ( t [ 1 ] ) == 1 and t [ 1 ] [ 0 ] [ 0 ] != self . YEAR : t [ 0 ] = 'source' comps . append ( t ) # After conversions # [ [ ' about ' , [ ( 0 , ' diabetes ' ) ] ] , # [ ' from ' , [ ( 4 , 2014 ) ] ] , # [ ' to ' , [ ( 4 , 2016 ) ] ] , # [ ' source ' , [ ( 0 , ' healthindicators . gov ' ) ] ] ] # Join all of the terms into single marker groups groups = { marker : [ ] for marker , _ in comps } for marker , terms in comps : groups [ marker ] += [ term for marker , term in terms ] # At this point , the groups dict is formed , but it will have a list # for each marker that has multiple terms . # Only a few of the markers should have more than one term , so move # extras to the about group for marker , group in groups . items ( ) : if marker == 'about' : continue if len ( group ) > 1 and marker not in self . multiterms : groups [ marker ] , extras = [ group [ 0 ] ] , group [ 1 : ] if not 'about' in groups : groups [ 'about' ] = extras else : groups [ 'about' ] += extras if marker == 'by' : groups [ 'by' ] = [ self . geograins . get ( self . stem ( e ) ) for e in group ] for marker , terms in iteritems ( groups ) : if len ( terms ) > 1 : if marker in 'in' : groups [ marker ] = ' ' . join ( terms ) else : groups [ marker ] = term_join ( terms ) elif len ( terms ) == 1 : groups [ marker ] = terms [ 0 ] else : pass # After grouping : # { ' to ' : 2016, # ' about ' : ' diabetes ' , # ' from ' : 2014, # ' source ' : ' healthindicators . gov ' } # If there were any markers with multiple terms , they would be cast in the or _ join form . return groups
def handle_padding ( self , padding ) : '''Pads the image with transparent pixels if necessary .'''
left = padding [ 0 ] top = padding [ 1 ] right = padding [ 2 ] bottom = padding [ 3 ] offset_x = 0 offset_y = 0 new_width = self . engine . size [ 0 ] new_height = self . engine . size [ 1 ] if left > 0 : offset_x = left new_width += left if top > 0 : offset_y = top new_height += top if right > 0 : new_width += right if bottom > 0 : new_height += bottom new_engine = self . context . modules . engine . __class__ ( self . context ) new_engine . image = new_engine . gen_image ( ( new_width , new_height ) , '#fff' ) new_engine . enable_alpha ( ) new_engine . paste ( self . engine , ( offset_x , offset_y ) ) self . engine . image = new_engine . image
def query ( self , sql , timeout = 10 ) : """Submit a query and return results . : param sql : string : param timeout : int : return : pydrill . client . ResultQuery"""
if not sql : raise QueryError ( 'No query passed to drill.' ) result = ResultQuery ( * self . perform_request ( ** { 'method' : 'POST' , 'url' : '/query.json' , 'body' : { "queryType" : "SQL" , "query" : sql } , 'params' : { 'request_timeout' : timeout } } ) ) return result
def setup_logger ( log_dir = None , loglevel = logging . DEBUG ) : """Instantiate logger Parameters log _ dir : str Directory to save log , default : ~ / . ding0 / logging / loglevel : Level of logger ."""
create_home_dir ( ) create_dir ( os . path . join ( get_default_home_dir ( ) , 'log' ) ) if log_dir is None : log_dir = os . path . join ( get_default_home_dir ( ) , 'log' ) logger = logging . getLogger ( 'ding0' ) # use filename as name in log logger . setLevel ( loglevel ) # create a file handler handler = logging . FileHandler ( os . path . join ( log_dir , 'ding0.log' ) ) handler . setLevel ( logging . DEBUG ) formatter = logging . Formatter ( '%(asctime)s-%(funcName)s-%(message)s (%(levelname)s)' ) handler . setFormatter ( formatter ) # create a stream handler ( print to prompt ) stream = logging . StreamHandler ( ) stream . setLevel ( logging . INFO ) stream_formatter = logging . Formatter ( '%(message)s (%(levelname)s)' ) stream . setFormatter ( stream_formatter ) # add the handlers to the logger logger . addHandler ( handler ) logger . addHandler ( stream ) logger . info ( '########## New run of Ding0 issued #############' ) return logger
def get_all ( self , uids : Iterable [ int ] ) -> Mapping [ int , Record ] : """Get records by a set of UIDs . Args : uids : The message UIDs ."""
return { uid : self . _records [ uid ] for uid in uids if uid in self . _records }
def flush ( self ) : """Synchronizes data to the underlying database file ."""
if self . flag [ 0 ] != 'r' : with self . write_mutex : if hasattr ( self . db , 'sync' ) : self . db . sync ( ) else : # fall - back , close and re - open , needed for ndbm flag = self . flag if flag [ 0 ] == 'n' : flag = 'c' + flag [ 1 : ] # don ' t clobber an existing database self . db . close ( ) # noinspection PyArgumentList self . db = self . open ( self . path , flag , self . mode , ** self . open_kwargs )
def origin_mexico ( origin ) : """Returns if the origin is Mexico . ` origin ` The origin to check ."""
return origin in ( u'CIUDADJUAREZ' , u'GUADALAJARA' , u'HERMOSILLO' , u'MATAMOROS' , u'MERIDA' , u'MEXICO' , u'MONTERREY' , u'NOGALES' , u'NUEVOLAREDO' , u'TIJUANA' )
def _fuzzy_time_parse ( self , value ) : """Parses a fuzzy time value into a meaningful interpretation . ` value ` String value to parse ."""
value = value . lower ( ) . strip ( ) today = datetime . date . today ( ) if value in ( 'today' , 't' ) : return today else : kwargs = { } if value in ( 'y' , 'yesterday' ) : kwargs [ 'days' ] = - 1 elif value in ( 'w' , 'wk' , 'week' , 'last week' ) : kwargs [ 'days' ] = - 7 else : # match days match = re . match ( r'(\d+)\s*(d|day|days)\s*(ago)?$' , value ) if match : kwargs [ 'days' ] = - int ( match . groups ( 1 ) [ 0 ] ) else : # match weeks match = re . match ( r'(\d+)\s*(w|wk|week|weeks)\s*(ago)?$' , value ) if match : kwargs [ 'weeks' ] = - int ( match . groups ( 1 ) [ 0 ] ) if kwargs : return today + datetime . timedelta ( ** kwargs ) return None
def get_score ( self , terms ) : """Get score for a list of terms . : type terms : list : param terms : A list of terms to be analyzed . : returns : dict"""
assert isinstance ( terms , list ) or isinstance ( terms , tuple ) score_li = np . asarray ( [ self . _get_score ( t ) for t in terms ] ) s_pos = np . sum ( score_li [ score_li > 0 ] ) s_neg = - np . sum ( score_li [ score_li < 0 ] ) s_pol = ( s_pos - s_neg ) * 1.0 / ( ( s_pos + s_neg ) + self . EPSILON ) s_sub = ( s_pos + s_neg ) * 1.0 / ( len ( score_li ) + self . EPSILON ) return { self . TAG_POS : s_pos , self . TAG_NEG : s_neg , self . TAG_POL : s_pol , self . TAG_SUB : s_sub }
def _should_send ( self , rebuild , success , auto_canceled , manual_canceled ) : """Return True if any state in ` self . send _ on ` meets given conditions , thus meaning that a notification mail should be sent ."""
should_send = False should_send_mapping = { self . MANUAL_SUCCESS : not rebuild and success , self . MANUAL_FAIL : not rebuild and not success , self . MANUAL_CANCELED : not rebuild and manual_canceled , self . AUTO_SUCCESS : rebuild and success , self . AUTO_FAIL : rebuild and not success , self . AUTO_CANCELED : rebuild and auto_canceled } for state in self . send_on : should_send |= should_send_mapping [ state ] return should_send
def py2round ( value ) : """Round values as in Python 2 , for Python 3 compatibility . All x . 5 values are rounded away from zero . In Python 3 , this has changed to avoid bias : when x is even , rounding is towards zero , when x is odd , rounding is away from zero . Thus , in Python 3 , round ( 2.5 ) results in 2, round ( 3.5 ) is 4. Python 3 also returns an int ; Python 2 returns a float ."""
if value > 0 : return float ( floor ( float ( value ) + 0.5 ) ) else : return float ( ceil ( float ( value ) - 0.5 ) )
def get ( self , request , bot_id , id , format = None ) : """Get list of telegram recipients of a hook serializer : TelegramRecipientSerializer responseMessages : - code : 401 message : Not authenticated"""
return super ( TelegramRecipientList , self ) . get ( request , bot_id , id , format )
def get_winner ( trials ) : """Get winner trial of a job ."""
winner = { } # TODO : sort _ key should be customized here sort_key = "accuracy" if trials and len ( trials ) > 0 : first_metrics = get_trial_info ( trials [ 0 ] ) [ "metrics" ] if first_metrics and not first_metrics . get ( "accuracy" , None ) : sort_key = "episode_reward" max_metric = float ( "-Inf" ) for t in trials : metrics = get_trial_info ( t ) . get ( "metrics" , None ) if metrics and metrics . get ( sort_key , None ) : current_metric = float ( metrics [ sort_key ] ) if current_metric > max_metric : winner [ "trial_id" ] = t . trial_id winner [ "metric" ] = sort_key + ": " + str ( current_metric ) max_metric = current_metric return winner
def create ( name = 'local' ) : """Creates a new KVStore . For single machine training , there are two commonly used types : ` ` local ` ` : Copies all gradients to CPU memory and updates weights there . ` ` device ` ` : Aggregates gradients and updates weights on GPUs . With this setting , the KVStore also attempts to use GPU peer - to - peer communication , potentially accelerating the communication . For distributed training , KVStore also supports a number of types : ` ` dist _ sync ` ` : Behaves similarly to ` ` local ` ` but with one major difference . With ` ` dist _ sync ` ` , batch - size now means the batch size used on each machine . So if there are ` ` n ` ` machines and we use batch size ` ` b ` ` , then ` ` dist _ sync ` ` behaves like ` ` local ` ` with batch size ` ` n * b ` ` . ` ` dist _ device _ sync ` ` : Identical to ` ` dist _ sync ` ` with the difference similar to ` ` device ` ` vs ` ` local ` ` . ` ` dist _ async ` ` : Performs asynchronous updates . The weights are updated whenever gradients are received from any machine . No two updates happen on the same weight at the same time . However , the order is not guaranteed . Parameters name : { ' local ' , ' device ' , ' nccl ' , ' dist _ sync ' , ' dist _ device _ sync ' , ' dist _ async ' } The type of KVStore . Returns kv : KVStore The created KVStore ."""
if not isinstance ( name , string_types ) : raise TypeError ( 'name must be a string' ) handle = KVStoreHandle ( ) check_call ( _LIB . MXKVStoreCreate ( c_str ( name ) , ctypes . byref ( handle ) ) ) kv = KVStore ( handle ) set_kvstore_handle ( kv . handle ) return kv
def desc ( self , table ) : '''Returns table description > > > yql . desc ( ' geo . countries ' )'''
query = "desc {0}" . format ( table ) response = self . raw_query ( query ) return response
def verify_not_equal ( self , first , second , msg = "" ) : """Soft assert for inequality : params want : the value to compare against : params second : the value to compare with : params msg : ( Optional ) msg explaining the difference"""
try : self . assert_not_equal ( first , second , msg ) except AssertionError , e : if msg : m = "%s:\n%s" % ( msg , str ( e ) ) else : m = str ( e ) self . verification_erorrs . append ( m )
def add ( self , recipients ) : """Add the supplied recipients to the exiting list : param recipients : list of either address strings or tuples ( name , address ) or dictionary elements : type recipients : list [ str ] or list [ tuple ] or list [ dict ]"""
if recipients : if isinstance ( recipients , str ) : self . _recipients . append ( Recipient ( address = recipients , parent = self . _parent , field = self . _field ) ) elif isinstance ( recipients , Recipient ) : self . _recipients . append ( recipients ) elif isinstance ( recipients , tuple ) : name , address = recipients if address : self . _recipients . append ( Recipient ( address = address , name = name , parent = self . _parent , field = self . _field ) ) elif isinstance ( recipients , list ) : for recipient in recipients : self . add ( recipient ) else : raise ValueError ( 'Recipients must be an address string, a ' 'Recipient instance, a (name, address) ' 'tuple or a list' ) self . _track_changes ( )
def read_files ( self , condition = '*' ) : """Read specific files from archive into memory . If " condition " is a list of numbers , then return files which have those positions in infolist . If " condition " is a string , then it is treated as a wildcard for names of files to extract . If " condition " is a function , it is treated as a callback function , which accepts a RarInfo object and returns boolean True ( extract ) or False ( skip ) . If " condition " is omitted , all files are returned . Returns list of tuples ( RarInfo info , str contents )"""
checker = condition2checker ( condition ) return RarFileImplementation . read_files ( self , checker )
def read_pseudo_zval ( self ) : """Create pseudopotential ZVAL dictionary ."""
try : def poscar_line ( results , match ) : poscar_line = match . group ( 1 ) results . poscar_line = re . findall ( r'[A-Z][a-z]?' , poscar_line ) def zvals ( results , match ) : zvals = match . group ( 1 ) results . zvals = map ( float , re . findall ( r'-?\d+\.\d*' , zvals ) ) search = [ ] search . append ( [ r'^.*POSCAR.*=(.*)' , None , poscar_line ] ) search . append ( [ r'^\s+ZVAL.*=(.*)' , None , zvals ] ) micro_pyawk ( self . filename , search , self ) zval_dict = { } for x , y in zip ( self . poscar_line , self . zvals ) : zval_dict . update ( { x : y } ) self . zval_dict = zval_dict # Clean - up del ( self . poscar_line ) del ( self . zvals ) except : raise Exception ( "ZVAL dict could not be parsed." )
def hide_intf_loopback_holder_interface_loopback_vrf_forwarding ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) hide_intf_loopback_holder = ET . SubElement ( config , "hide-intf-loopback-holder" , xmlns = "urn:brocade.com:mgmt:brocade-intf-loopback" ) interface = ET . SubElement ( hide_intf_loopback_holder , "interface" ) loopback = ET . SubElement ( interface , "loopback" ) id_key = ET . SubElement ( loopback , "id" ) id_key . text = kwargs . pop ( 'id' ) vrf = ET . SubElement ( loopback , "vrf" ) forwarding = ET . SubElement ( vrf , "forwarding" ) forwarding . text = kwargs . pop ( 'forwarding' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def kmer_count ( seq_list , k ) : """Generate k - mer counts from a set of sequences Args : seq _ list ( iterable ) : List of DNA sequences ( with letters from { A , C , G , T } ) k ( int ) : K in k - mer . Returns : pandas . DataFrame : Count matrix for seach sequence in seq _ list Example : > > > kmer _ count ( [ " ACGTTAT " , " GACGCGA " ] , 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0"""
# generate all k - mers all_kmers = generate_all_kmers ( k ) kmer_count_list = [ ] for seq in seq_list : kmer_count_list . append ( [ seq . count ( kmer ) for kmer in all_kmers ] ) return pd . DataFrame ( kmer_count_list , columns = all_kmers )
def _extract_coeffs ( self , imt ) : """Extract dictionaries of coefficients specific to required intensity measure type ."""
C_HR = self . COEFFS_HARD_ROCK [ imt ] C_BC = self . COEFFS_BC [ imt ] C_SR = self . COEFFS_SOIL_RESPONSE [ imt ] SC = self . COEFFS_STRESS [ imt ] return C_HR , C_BC , C_SR , SC
def load ( fp , ** kwargs ) -> BioCCollection : """Deserialize fp ( a . read ( ) - supporting text file or binary file containing a JSON document ) to a BioCCollection object Args : fp : a file containing a JSON document * * kwargs : Returns : BioCCollection : a collection"""
obj = json . load ( fp , ** kwargs ) return parse_collection ( obj )
def from_optional_dicts_by_key ( cls , ds : Optional [ dict ] , force_snake_case = True , force_cast : bool = False , restrict : bool = True ) -> TOption [ TDict [ T ] ] : """From dict of dict to optional dict of instance . : param ds : Dict of dict : param force _ snake _ case : Keys are transformed to snake case in order to compliant PEP8 if True : param force _ cast : Cast forcibly if True : param restrict : Prohibit extra parameters if True : return : Dict of instance Usage : > > > from owlmixin . samples import Human > > > Human . from _ optional _ dicts _ by _ key ( None ) . is _ none ( ) True > > > Human . from _ optional _ dicts _ by _ key ( { } ) . get ( )"""
return TOption ( cls . from_dicts_by_key ( ds , force_snake_case = force_snake_case , force_cast = force_cast , restrict = restrict ) if ds is not None else None )
def missing ( self , * args , ** kwds ) : """Return whether an output is considered missing or not ."""
from functools import reduce indexer = kwds [ 'indexer' ] freq = kwds [ 'freq' ] or generic . default_freq ( ** indexer ) miss = ( checks . missing_any ( generic . select_time ( da , ** indexer ) , freq ) for da in args ) return reduce ( np . logical_or , miss )
def process_xml_file ( file_name ) : """Return a TripsProcessor by processing a TRIPS EKB XML file . Parameters file _ name : str Path to a TRIPS extraction knowledge base ( EKB ) file to be processed . Returns tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp . statements ."""
with open ( file_name , 'rb' ) as fh : ekb = fh . read ( ) . decode ( 'utf-8' ) return process_xml ( ekb )
def all_es_aliases ( self ) : """List all aliases used in ES"""
r = self . requests . get ( self . url + "/_aliases" , headers = HEADER_JSON , verify = False ) try : r . raise_for_status ( ) except requests . exceptions . HTTPError as ex : logger . warning ( "Something went wrong when retrieving aliases on %s." , self . anonymize_url ( self . index_url ) ) logger . warning ( ex ) return aliases = [ ] for index in r . json ( ) . keys ( ) : aliases . extend ( list ( r . json ( ) [ index ] [ 'aliases' ] . keys ( ) ) ) aliases = list ( set ( aliases ) ) return aliases
def calc_qiga2_v1 ( self ) : """Perform the runoff concentration calculation for the second interflow component . The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step . Required derived parameter : | KI2 | Required state sequence : | QIGZ2 | Calculated state sequence : | QIGA2 | Basic equation : : math : ` QIGA2 _ { neu } = QIGA2 _ { alt } + ( QIGZ2 _ { alt } - QIGA2 _ { alt } ) \\ cdot ( 1 - exp ( - KI2 ^ { - 1 } ) ) + ( QIGZ2 _ { neu } - QIGZ2 _ { alt } ) \\ cdot ( 1 - KI2 \\ cdot ( 1 - exp ( - KI2 ^ { - 1 } ) ) ) ` Examples : A normal test case : > > > from hydpy . models . lland import * > > > parameterstep ( ) > > > derived . ki2(0.1) > > > states . qigz2 . old = 2.0 > > > states . qigz2 . new = 4.0 > > > states . qiga2 . old = 3.0 > > > model . calc _ qiga2 _ v1 ( ) > > > states . qiga2 qiga2(3.800054) First extreme test case ( zero division is circumvented ) : > > > derived . ki2(0.0) > > > model . calc _ qiga2 _ v1 ( ) > > > states . qiga2 qiga2(4.0) Second extreme test case ( numerical overflow is circumvented ) : > > > derived . ki2(1e500) > > > model . calc _ qiga2 _ v1 ( ) > > > states . qiga2 qiga2(5.0)"""
der = self . parameters . derived . fastaccess old = self . sequences . states . fastaccess_old new = self . sequences . states . fastaccess_new if der . ki2 <= 0. : new . qiga2 = new . qigz2 elif der . ki2 > 1e200 : new . qiga2 = old . qiga2 + new . qigz2 - old . qigz2 else : d_temp = ( 1. - modelutils . exp ( - 1. / der . ki2 ) ) new . qiga2 = ( old . qiga2 + ( old . qigz2 - old . qiga2 ) * d_temp + ( new . qigz2 - old . qigz2 ) * ( 1. - der . ki2 * d_temp ) )
def get_inasafe_default_value_qsetting ( qsetting , category , inasafe_field_key ) : """Helper method to get the inasafe default value from qsetting . : param qsetting : QSetting . : type qsetting : QSetting : param category : Category of the default value . It can be global or recent . Global means the global setting for default value . Recent means the last set custom for default value from the user . : type category : str : param inasafe _ field _ key : Key for the field . : type inasafe _ field _ key : str : returns : Value of the inasafe _ default _ value . : rtype : float"""
key = 'inasafe/default_value/%s/%s' % ( category , inasafe_field_key ) default_value = qsetting . value ( key ) if default_value is None : if category == GLOBAL : # If empty for global setting , use default one . inasafe_field = definition ( inasafe_field_key ) default_value = inasafe_field . get ( 'default_value' , { } ) return default_value . get ( 'default_value' , zero_default_value ) return zero_default_value try : return float ( default_value ) except ValueError : return zero_default_value
def org_invite ( object_id , input_params = { } , always_retry = True , ** kwargs ) : """Invokes the / org - xxxx / invite API method . For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Organizations # API - method % 3A - % 2Forg - xxxx % 2Finvite"""
return DXHTTPRequest ( '/%s/invite' % object_id , input_params , always_retry = always_retry , ** kwargs )
def held ( name ) : '''Set package in ' hold ' state , meaning it will not be upgraded . name The name of the package , e . g . , ' tmux ' '''
ret = { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : '' } state = __salt__ [ 'pkg.get_selections' ] ( pattern = name , ) if not state : ret . update ( comment = 'Package {0} does not have a state' . format ( name ) ) elif not salt . utils . data . is_true ( state . get ( 'hold' , False ) ) : if not __opts__ [ 'test' ] : result = __salt__ [ 'pkg.set_selections' ] ( selection = { 'hold' : [ name ] } ) ret . update ( changes = result [ name ] , result = True , comment = 'Package {0} is now being held' . format ( name ) ) else : ret . update ( result = None , comment = 'Package {0} is set to be held' . format ( name ) ) else : ret . update ( result = True , comment = 'Package {0} is already held' . format ( name ) ) return ret
def group_attrib ( self ) : '''return a namedtuple containing all attributes attached to groups of which the given series is a member for each group of which the series is a member'''
group_attributes = [ g . attrib for g in self . dataset . groups if self in g ] if group_attributes : return concat_namedtuples ( * group_attributes )
def get_labels_encoder ( self , data_dir ) : """Builds encoder for the given class labels . Args : data _ dir : data directory Returns : An encoder for class labels ."""
label_filepath = os . path . join ( data_dir , self . vocab_filename ) return text_encoder . TokenTextEncoder ( label_filepath )