signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def main ( ) : """A CLI application for performing factory calibration of an Opentrons robot Instructions : - Robot must be set up with two 300ul or 50ul single - channel pipettes installed on the right - hand and left - hand mount . - Put a GEB 300ul tip onto the pipette . - Use the arrow keys to jog the robot over slot 5 in an open space that is not an engraving or a hole . - Use the ' q ' and ' a ' keys to jog the pipette up and down respectively until the tip is just touching the deck surface , then press ' z ' . This will save the ' Z ' height . - Press ' 1 ' to automatically go to the expected location of the first calibration point . Jog the robot until the tip is actually at the point , then press ' enter ' . - Repeat with ' 2 ' and ' 3 ' . - After calibrating all three points , press the space bar to save the configuration . - Optionally , press 4,5,6 or 7 to validate the new configuration . - Press ' p ' to perform tip probe . Press the space bar to save again . - Press ' m ' to perform mount calibration . Press enter and then space bar to save again . - Press ' esc ' to exit the program ."""
prompt = input ( ">>> Warning! Running this tool backup and clear any previous " "calibration data. Proceed (y/[n])? " ) if prompt not in [ 'y' , 'Y' , 'yes' ] : print ( 'Exiting--prior configuration data not changed' ) sys . exit ( ) # Notes : # - 200ul tip is 51.7mm long when attached to a pipette # - For xyz coordinates , ( 0 , 0 , 0 ) is the lower - left corner of the robot cli = CLITool ( point_set = get_calibration_points ( ) , tip_length = 51.7 ) hardware = cli . hardware backup_configuration_and_reload ( hardware ) if not feature_flags . use_protocol_api_v2 ( ) : hardware . connect ( ) hardware . turn_on_rail_lights ( ) atexit . register ( hardware . turn_off_rail_lights ) else : hardware . set_lights ( rails = True ) cli . home ( ) # lights help the script user to see the points on the deck cli . ui_loop . run ( ) if feature_flags . use_protocol_api_v2 ( ) : hardware . set_lights ( rails = False ) print ( 'Robot config: \n' , cli . _config )
def find_root ( self ) : """Traverse parent refs to top ."""
cmd = self while cmd . parent : cmd = cmd . parent return cmd
def has_instance ( name , provider = None ) : '''Return true if the instance is found on a provider CLI Example : . . code - block : : bash salt minionname cloud . has _ instance myinstance'''
data = get_instance ( name , provider ) if data is None : return False return True
def _expand_join ( join_definition ) : """Expand join definition to ` join ' call . : param join _ definition : join definition : return : expanded join definition"""
join_table_name = join_definition . pop ( 'table' ) join_func = getattr ( mosql_query , join_definition . pop ( 'join_type' , 'join' ) ) return join_func ( join_table_name , ** join_definition )
def success ( self ) : """Test whether the experiment has been run successfully . This will be False if the experiment hasn ' t been run , or if it ' s been run and failed ( in which case the exception will be stored in the metadata ) . : returns : ` ` True ` ` if the experiment has been run successfully"""
if self . STATUS in self . metadata ( ) . keys ( ) : return ( self . metadata ( ) ) [ self . STATUS ] else : return False
def _is_metadata_of ( group , parent_group ) : """Check if a given group is a metadata group for a given parent _ group ."""
if group . _v_depth <= parent_group . _v_depth : return False current = group while current . _v_depth > 1 : parent = current . _v_parent if parent == parent_group and current . _v_name == 'meta' : return True current = current . _v_parent return False
def use_project ( self , project_id ) : """Creates an instance of [ ProjectClient ] ( # projectclient ) , providing session authentication . Parameters : * ` project _ id ` - project identifier . Returns : Instance of [ ProjectClient ] ( # projectclient ) with session authentication . Example : ` ` ` python client = Client ( ' deform . io ' ) session _ client = client . auth ( ' session ' , client . user . login ( ' email @ example . com ' , ' password ' ) session _ client . use _ project ( ' some - project - id ' )"""
return ProjectClient ( base_uri = get_base_uri ( project = project_id , host = self . host , port = self . port , secure = self . secure , api_base_path = self . api_base_path ) , auth_header = self . auth_header , requests_session = self . requests_session , request_defaults = self . request_defaults , )
def index ( self , text , terms = None , ** kwargs ) : """Index all term pair distances . Args : text ( Text ) : The source text . terms ( list ) : Terms to index ."""
self . clear ( ) # By default , use all terms . terms = terms or text . terms . keys ( ) pairs = combinations ( terms , 2 ) count = comb ( len ( terms ) , 2 ) for t1 , t2 in bar ( pairs , expected_size = count , every = 1000 ) : # Set the Bray - Curtis distance . score = text . score_braycurtis ( t1 , t2 , ** kwargs ) self . set_pair ( t1 , t2 , score )
def on_api_socket_reconnected ( self ) : """for API socket reconnected"""
self . __is_acc_sub_push = False self . __last_acc_list = [ ] ret , msg = RET_OK , '' # auto unlock trade if self . _ctx_unlock is not None : password , password_md5 = self . _ctx_unlock ret , data = self . unlock_trade ( password , password_md5 ) logger . debug ( 'auto unlock trade ret={},data={}' . format ( ret , data ) ) if ret != RET_OK : msg = data # 定阅交易帐号推送 if ret == RET_OK : self . __check_acc_sub_push ( ) return ret , msg
def filter_mean ( matrix , top ) : """Filter genes in an expression matrix by mean expression . Parameters matrix : ExpMatrix The expression matrix . top : int The number of genes to retain . Returns ExpMatrix The filtered expression matrix ."""
assert isinstance ( matrix , ExpMatrix ) assert isinstance ( top , int ) if top >= matrix . p : logger . warning ( 'Gene expression filter with `top` parameter that is ' '>= the number of genes!' ) top = matrix . p a = np . argsort ( np . mean ( matrix . X , axis = 1 ) ) a = a [ : : - 1 ] sel = np . zeros ( matrix . p , dtype = np . bool_ ) sel [ a [ : top ] ] = True matrix = matrix . loc [ sel ] return matrix
def import_egg ( string ) : """Import a controller class from an egg . Uses the entry point group " appathy . controller " ."""
# Split the string into a distribution and a name dist , _sep , name = string . partition ( '#' ) return pkg_resources . load_entry_point ( dist , 'appathy.controller' , name )
def consume ( self , message ) : """Consume a JSON RPC message from the client . Args : message ( dict ) : The JSON RPC message sent by the client"""
if 'jsonrpc' not in message or message [ 'jsonrpc' ] != JSONRPC_VERSION : log . warn ( "Unknown message type %s" , message ) return if 'id' not in message : log . debug ( "Handling notification from client %s" , message ) self . _handle_notification ( message [ 'method' ] , message . get ( 'params' ) ) elif 'method' not in message : log . debug ( "Handling response from client %s" , message ) self . _handle_response ( message [ 'id' ] , message . get ( 'result' ) , message . get ( 'error' ) ) else : try : log . debug ( "Handling request from client %s" , message ) self . _handle_request ( message [ 'id' ] , message [ 'method' ] , message . get ( 'params' ) ) except JsonRpcException as e : log . exception ( "Failed to handle request %s" , message [ 'id' ] ) self . _consumer ( { 'jsonrpc' : JSONRPC_VERSION , 'id' : message [ 'id' ] , 'error' : e . to_dict ( ) } ) except Exception : # pylint : disable = broad - except log . exception ( "Failed to handle request %s" , message [ 'id' ] ) self . _consumer ( { 'jsonrpc' : JSONRPC_VERSION , 'id' : message [ 'id' ] , 'error' : JsonRpcInternalError . of ( sys . exc_info ( ) ) . to_dict ( ) } )
def screenshot ( self , filename ) : """Saves a screenshot of the current element to a PNG image file . Returns False if there is any IOError , else returns True . Use full paths in your filename . : Args : - filename : The full path you wish to save your screenshot to . This should end with a ` . png ` extension . : Usage : element . screenshot ( ' / Screenshots / foo . png ' )"""
if not filename . lower ( ) . endswith ( '.png' ) : warnings . warn ( "name used for saved screenshot does not match file " "type. It should end with a `.png` extension" , UserWarning ) png = self . screenshot_as_png try : with open ( filename , 'wb' ) as f : f . write ( png ) except IOError : return False finally : del png return True
def ack ( self , msg ) : """Called when a MESSAGE has been received . Override this method to handle received messages . This function will generate an acknowledge message for the given message and transaction ( if present ) ."""
message_id = msg [ 'headers' ] [ 'message-id' ] subscription = msg [ 'headers' ] [ 'subscription' ] transaction_id = None if 'transaction-id' in msg [ 'headers' ] : transaction_id = msg [ 'headers' ] [ 'transaction-id' ] # print " acknowledging message id < % s > . " % message _ id return ack ( message_id , subscription , transaction_id )
def move ( self , dst ) : """move this file / folder the [ dst ]"""
shutil . move ( self , dst ) self = PathStr ( dst ) . join ( self . basename ( ) ) return self
def get_rule ( name = 'all' ) : '''. . versionadded : : 2015.5.0 Display all matching rules as specified by name Args : name ( Optional [ str ] ) : The full name of the rule . ` ` all ` ` will return all rules . Default is ` ` all ` ` Returns : dict : A dictionary of all rules or rules that match the name exactly Raises : CommandExecutionError : If the command fails CLI Example : . . code - block : : bash salt ' * ' firewall . get _ rule ' MyAppPort ' '''
cmd = [ 'netsh' , 'advfirewall' , 'firewall' , 'show' , 'rule' , 'name={0}' . format ( name ) ] ret = __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = False , ignore_retcode = True ) if ret [ 'retcode' ] != 0 : raise CommandExecutionError ( ret [ 'stdout' ] ) return { name : ret [ 'stdout' ] }
def load_uncached ( location , use_json = None ) : """Return data at either a file location or at the raw version of a URL , or raise an exception . A file location either contains no colons like / usr / tom / test . txt , or a single character and a colon like C : / WINDOWS / STUFF . A URL location is anything that ' s not a file location . If the URL ends in . json , . yml or . yaml and ` json ! = False ` , or ` json = = True ` , convert the data from YAML or JSON ."""
if not whitelist . is_file ( location ) : r = requests . get ( raw . raw ( location ) ) if not r . ok : raise ValueError ( 'Couldn\'t read %s with code %s:\n%s' % ( location , r . status_code , r . text ) ) data = r . text else : try : f = os . path . realpath ( os . path . abspath ( os . path . expanduser ( location ) ) ) data = open ( f ) . read ( ) except Exception as e : e . args = ( 'There was an error reading the file' , location , f ) + e . args raise if use_json is None : use_json = any ( location . endswith ( s ) for s in SUFFIXES ) if not use_json : return data try : return yaml . load ( data ) except Exception as e : e . args = ( 'There was a JSON error in the file' , location ) + e . args raise
def validate ( self , value , model = None , context = None ) : """Validate Perform value validation against validation settings and return error object . : param value : list , value to check : param model : parent model being validated : param context : object or None , validation context : return : shiftschema . result . Error"""
invalid = [ item for item in value if item not in self . choices ] if len ( invalid ) : return Error ( self . invalid_multichoice , dict ( items = ', ' . join ( invalid ) ) ) # success otherwise return Error ( )
def topic_search ( text , corpus = doc_topic_vectors , pcaer = pcaer , corpus_text = corpus ) : """search for the most relevant document"""
tokens = tokenize ( text , vocabulary = corpus . columns ) tfidf_vector_query = np . array ( tfidfer . transform ( [ ' ' . join ( tokens ) ] ) . todense ( ) ) [ 0 ] topic_vector_query = pcaer . transform ( [ tfidf_vector_query ] ) query_series = pd . Series ( topic_vector_query , index = corpus . columns ) return corpus_text [ query_series . dot ( corpus . T ) . values . argmax ( ) ]
def list_train_dirs ( dir_ : str , recursive : bool , all_ : bool , long : bool , verbose : bool ) -> None : """List training dirs contained in the given dir with options and outputs similar to the regular ` ls ` command . The function is accessible through cxflow CLI ` cxflow ls ` . : param dir _ : dir to be listed : param recursive : walk recursively in sub - directories , stop at train dirs ( - - recursive option ) : param all _ : include train dirs with no epochs done ( - - all option ) : param long : list more details including model name , odel and dataset class , age , duration and epochs done ( - - long option ) : param verbose : print more verbose output with list of additional artifacts and training config , applicable only when a single train dir is listed ( - - verbose option )"""
if verbose : long = True if dir_ == CXF_DEFAULT_LOG_DIR and not path . exists ( CXF_DEFAULT_LOG_DIR ) : print ( 'The default log directory `{}` does not exist.\n' 'Consider specifying the directory to be listed as an argument.' . format ( CXF_DEFAULT_LOG_DIR ) ) quit ( 1 ) if not path . exists ( dir_ ) : print ( 'Specified dir `{}` does not exist' . format ( dir_ ) ) quit ( 1 ) all_trainings = _ls_print_listing ( dir_ , recursive , all_ , long ) if long and len ( all_trainings ) > 1 : if not recursive : print ( ) _ls_print_summary ( all_trainings ) if verbose and len ( all_trainings ) == 1 : if not recursive : print ( ) _ls_print_verbose ( all_trainings [ 0 ] )
def _cas_3 ( self ) : '''Latitude overlap ( 2 images ) .'''
lonc = self . _format_lon ( self . lonm ) latc_top = self . _format_lat ( self . latM ) latc_bot = self . _format_lat ( self . latm ) img_name_top = self . _format_name_map ( lonc , latc_top ) print ( img_name_top ) img_top = BinaryTable ( img_name_top , self . path_pdsfiles ) print ( self . lonm , self . lonM , float ( img_top . MINIMUM_LATITUDE ) , self . latM ) X_top , Y_top , Z_top = img_top . extract_grid ( self . lonm , self . lonM , float ( img_top . MINIMUM_LATITUDE ) , self . latM ) img_name_bottom = self . _format_name_map ( lonc , latc_bot ) print ( img_name_bottom ) img_bottom = BinaryTable ( img_name_bottom , self . path_pdsfiles ) X_bottom , Y_bottom , Z_bottom = img_bottom . extract_grid ( self . lonm , self . lonM , self . latm , float ( img_bottom . MAXIMUM_LATITUDE ) ) X_new = np . vstack ( ( X_top , X_bottom ) ) Y_new = np . vstack ( ( Y_top , Y_bottom ) ) Z_new = np . vstack ( ( Z_top , Z_bottom ) ) return X_new , Y_new , Z_new
def weight_function ( results , args = None , return_weights = False ) : """The default weight function utilized by : class : ` DynamicSampler ` . Zipped parameters are passed to the function via : data : ` args ` . Assigns each point a weight based on a weighted average of the posterior and evidence information content : : weight = pfrac * pweight + ( 1 . - pfrac ) * zweight where ` pfrac ` is the fractional importance placed on the posterior , the evidence weight ` zweight ` is based on the estimated remaining posterior mass , and the posterior weight ` pweight ` is the sample ' s importance weight . Returns a set of log - likelihood bounds set by the earliest / latest samples where ` weight > maxfrac * max ( weight ) ` , with additional left / right padding based on ` pad ` . Parameters results : : class : ` Results ` instance : class : ` Results ` instance . args : dictionary of keyword arguments , optional Arguments used to set the log - likelihood bounds used for sampling , as described above . Default values are ` pfrac = 0.8 ` , ` maxfrac = 0.8 ` , and ` pad = 1 ` . return _ weights : bool , optional Whether to return the individual weights ( and their components ) used to compute the log - likelihood bounds . Default is ` False ` . Returns logl _ bounds : tuple with shape ( 2 , ) Log - likelihood bounds ` ( logl _ min , logl _ max ) ` determined by the weights . weights : tuple with shape ( 3 , ) , optional The individual weights ` ( pweight , zweight , weight ) ` used to determine ` logl _ bounds ` ."""
# Initialize hyperparameters . if args is None : args = dict ( { } ) pfrac = args . get ( 'pfrac' , 0.8 ) if not 0. <= pfrac <= 1. : raise ValueError ( "The provided `pfrac` {0} is not between 0. and 1." . format ( pfrac ) ) maxfrac = args . get ( 'maxfrac' , 0.8 ) if not 0. <= maxfrac <= 1. : raise ValueError ( "The provided `maxfrac` {0} is not between 0. and 1." . format ( maxfrac ) ) lpad = args . get ( 'pad' , 1 ) if lpad < 0 : raise ValueError ( "`lpad` {0} is less than zero." . format ( lpad ) ) # Derive evidence weights . logz = results . logz # final ln ( evidence ) logz_remain = results . logl [ - 1 ] + results . logvol [ - 1 ] # remainder logz_tot = np . logaddexp ( logz [ - 1 ] , logz_remain ) # estimated upper bound lzones = np . ones_like ( logz ) logzin = misc . logsumexp ( [ lzones * logz_tot , logz ] , axis = 0 , b = [ lzones , - lzones ] ) # ln ( remaining evidence ) logzweight = logzin - np . log ( results . samples_n ) # ln ( evidence weight ) logzweight -= misc . logsumexp ( logzweight ) # normalize zweight = np . exp ( logzweight ) # convert to linear scale # Derive posterior weights . pweight = np . exp ( results . logwt - results . logz [ - 1 ] ) # importance weight pweight /= sum ( pweight ) # normalize # Compute combined weights . weight = ( 1. - pfrac ) * zweight + pfrac * pweight # Compute logl bounds nsamps = len ( logz ) bounds = np . arange ( nsamps ) [ weight > maxfrac * max ( weight ) ] bounds = ( min ( bounds ) - lpad , min ( max ( bounds ) + lpad , nsamps - 1 ) ) if bounds [ 0 ] < 0 : logl_min = - np . inf else : logl_min = results . logl [ bounds [ 0 ] ] logl_max = results . logl [ bounds [ 1 ] ] if return_weights : return ( logl_min , logl_max ) , ( pweight , zweight , weight ) else : return ( logl_min , logl_max )
def _convert_json_response_to_entity ( response , property_resolver , require_encryption , key_encryption_key , key_resolver ) : ''': param bool require _ encryption : If set , will enforce that the retrieved entity is encrypted and decrypt it . : param object key _ encryption _ key : The user - provided key - encryption - key . Must implement the following methods : unwrap _ key ( key , algorithm ) - - returns the unwrapped form of the specified symmetric key using the string - specified algorithm . get _ kid ( ) - - returns a string key id for this key - encryption - key . : param function key _ resolver ( kid ) : The user - provided key resolver . Uses the kid string to return a key - encryption - key implementing the interface defined above .'''
if response is None or response . body is None : return None root = loads ( response . body . decode ( 'utf-8' ) ) return _decrypt_and_deserialize_entity ( root , property_resolver , require_encryption , key_encryption_key , key_resolver )
def divide ( lhs , rhs ) : """Returns element - wise division of the input arrays with broadcasting . Equivalent to ` ` lhs / rhs ` ` and ` ` mx . nd . broadcast _ div ( lhs , rhs ) ` ` . . . note : : If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape . Parameters lhs : scalar or mxnet . ndarray . array First array in division . rhs : scalar or mxnet . ndarray . array Second array in division . The arrays to be divided . If ` ` lhs . shape ! = rhs . shape ` ` , they must be broadcastable to a common shape . Returns NDArray The element - wise division of the input arrays . Examples > > > x = mx . nd . ones ( ( 2,3 ) ) * 6 > > > y = mx . nd . ones ( ( 2,1 ) ) * 2 > > > x . asnumpy ( ) array ( [ [ 6 . , 6 . , 6 . ] , [ 6 . , 6 . , 6 . ] ] , dtype = float32) > > > y . asnumpy ( ) array ( [ [ 2 . ] , [ 2 . ] ] , dtype = float32) > > > x / 2 < NDArray 2x3 @ cpu ( 0 ) > > > > ( x / 3 ) . asnumpy ( ) array ( [ [ 2 . , 2 . , 2 . ] , [ 2 . , 2 . , 2 . ] ] , dtype = float32) > > > ( x / y ) . asnumpy ( ) array ( [ [ 3 . , 3 . , 3 . ] , [ 3 . , 3 . , 3 . ] ] , dtype = float32) > > > mx . nd . divide ( x , y ) . asnumpy ( ) array ( [ [ 3 . , 3 . , 3 . ] , [ 3 . , 3 . , 3 . ] ] , dtype = float32)"""
# pylint : disable = no - member , protected - access return _ufunc_helper ( lhs , rhs , op . broadcast_div , operator . truediv , _internal . _div_scalar , _internal . _rdiv_scalar )
def default ( self , obj ) : '''The required ` ` default ` ` method for ` ` JSONEncoder ` ` subclasses . Args : obj ( obj ) : The object to encode . Anything not specifically handled in this method is passed on to the default system JSON encoder .'''
from . . model import Model from . . colors import Color from . has_props import HasProps # array types - - use force _ list here , only binary # encoding CDS columns for now if pd and isinstance ( obj , ( pd . Series , pd . Index ) ) : return transform_series ( obj , force_list = True ) elif isinstance ( obj , np . ndarray ) : return transform_array ( obj , force_list = True ) elif isinstance ( obj , collections . deque ) : return list ( map ( self . default , obj ) ) elif isinstance ( obj , Model ) : return obj . ref elif isinstance ( obj , HasProps ) : return obj . properties_with_values ( include_defaults = False ) elif isinstance ( obj , Color ) : return obj . to_css ( ) else : return self . transform_python_types ( obj )
def forward ( self , inputs : torch . Tensor ) -> Dict [ str , torch . Tensor ] : # pylint : disable = arguments - differ """Compute context insensitive token embeddings for ELMo representations . Parameters inputs : ` ` torch . Tensor ` ` Shape ` ` ( batch _ size , sequence _ length , 50 ) ` ` of character ids representing the current batch . Returns Dict with keys : ` ` ' token _ embedding ' ` ` : ` ` torch . Tensor ` ` Shape ` ` ( batch _ size , sequence _ length + 2 , embedding _ dim ) ` ` tensor with context insensitive token representations . ` ` ' mask ' ` ` : ` ` torch . Tensor ` ` Shape ` ` ( batch _ size , sequence _ length + 2 ) ` ` long tensor with sequence mask ."""
# Add BOS / EOS mask = ( ( inputs > 0 ) . long ( ) . sum ( dim = - 1 ) > 0 ) . long ( ) character_ids_with_bos_eos , mask_with_bos_eos = add_sentence_boundary_token_ids ( inputs , mask , self . _beginning_of_sentence_characters , self . _end_of_sentence_characters ) # the character id embedding max_chars_per_token = self . _options [ 'char_cnn' ] [ 'max_characters_per_token' ] # ( batch _ size * sequence _ length , max _ chars _ per _ token , embed _ dim ) character_embedding = torch . nn . functional . embedding ( character_ids_with_bos_eos . view ( - 1 , max_chars_per_token ) , self . _char_embedding_weights ) # run convolutions cnn_options = self . _options [ 'char_cnn' ] if cnn_options [ 'activation' ] == 'tanh' : activation = torch . tanh elif cnn_options [ 'activation' ] == 'relu' : activation = torch . nn . functional . relu else : raise ConfigurationError ( "Unknown activation" ) # ( batch _ size * sequence _ length , embed _ dim , max _ chars _ per _ token ) character_embedding = torch . transpose ( character_embedding , 1 , 2 ) convs = [ ] for i in range ( len ( self . _convolutions ) ) : conv = getattr ( self , 'char_conv_{}' . format ( i ) ) convolved = conv ( character_embedding ) # ( batch _ size * sequence _ length , n _ filters for this width ) convolved , _ = torch . max ( convolved , dim = - 1 ) convolved = activation ( convolved ) convs . append ( convolved ) # ( batch _ size * sequence _ length , n _ filters ) token_embedding = torch . cat ( convs , dim = - 1 ) # apply the highway layers ( batch _ size * sequence _ length , n _ filters ) token_embedding = self . _highways ( token_embedding ) # final projection ( batch _ size * sequence _ length , embedding _ dim ) token_embedding = self . _projection ( token_embedding ) # reshape to ( batch _ size , sequence _ length , embedding _ dim ) batch_size , sequence_length , _ = character_ids_with_bos_eos . size ( ) return { 'mask' : mask_with_bos_eos , 'token_embedding' : token_embedding . view ( batch_size , sequence_length , - 1 ) }
def to_link ( self ) : """Returns a link for the resource ."""
link_type = self . link_type if self . type == 'Link' else self . type return Link ( { 'sys' : { 'linkType' : link_type , 'id' : self . sys . get ( 'id' ) } } , client = self . _client )
def dispatch ( self , receiver ) : '''Dispatch handling of this event to a receiver . This method will invoke ` ` receiver . _ columns _ streamed ` ` if it exists .'''
super ( ColumnsStreamedEvent , self ) . dispatch ( receiver ) if hasattr ( receiver , '_columns_streamed' ) : receiver . _columns_streamed ( self )
def _create_borderchoice_combo ( self ) : """Create border choice combo box"""
choices = [ c [ 0 ] for c in self . border_toggles ] self . borderchoice_combo = _widgets . BorderEditChoice ( self , choices = choices , style = wx . CB_READONLY , size = ( 50 , - 1 ) ) self . borderchoice_combo . SetToolTipString ( _ ( u"Choose borders for which attributes are changed" ) ) self . borderstate = self . border_toggles [ 0 ] [ 0 ] self . AddControl ( self . borderchoice_combo ) self . Bind ( wx . EVT_COMBOBOX , self . OnBorderChoice , self . borderchoice_combo ) self . borderchoice_combo . SetValue ( "AllBorders" )
def create_container ( self , name , image , hostname = 'dfis' , networkmode = 'bridge' , ports = None , volumes = None , env = None , restartpolicy = 'no' , restartretrycount = '2' , command = "" ) : """testing : param name : : param image : : param hostname : : param networkmode : ` class ` : ` str ` , host | bridge : param ports : ` class ` : ` list ` , [ { ' type ' : ' tcp ' , ' publicport ' : 8080 , ' privateport ' : 80 , ' ip ' : ' 0.0.0.0 } ] : param volumes : ` class ` : ` list ` , [ { " containervolume " : " / app - conf " , " hostvolume " : " / opt / app / app - conf " } ] : param env : ` class ` : ` list ` , [ " var = value " , " var1 = value1 " ] : param restartpolicy : ` class ` : ` str ` , always | on - failure | no ( default ) : param restartretrycount : 仅当 restartpolicy 是 on - failure 时才有用 : param command : : return :"""
restartpolicy = restartpolicy . lower ( ) repository , image_name , version = utils . parse_image_name ( image ) image = '{0}/{1}:{2}' . format ( DOCKER_NEG , image_name , version ) body = dict ( name = name , image = image , hostname = hostname , networkmode = networkmode , ports = ports or [ ] , volumes = volumes or [ ] , env = env or [ ] , restartpolicy = restartpolicy , command = command ) if restartpolicy == 'on-failure' : body [ 'restartretrycount' ] = restartretrycount return "/dockerapi/v2/containers" , body
def is_number ( dtype ) : """Return True is datatype dtype is a number kind"""
return is_float ( dtype ) or ( 'int' in dtype . name ) or ( 'long' in dtype . name ) or ( 'short' in dtype . name )
def decode_once ( estimator , problem_name , hparams , infer_input_fn , decode_hp , decode_to_file , output_dir , log_results = True , checkpoint_path = None ) : """Decodes once . Args : estimator : tf . estimator . Estimator instance . Used to generate encoded predictions . problem _ name : str . Name of problem . hparams : HParams instance . HParams for model training . infer _ input _ fn : zero - arg function . Input function for estimator . decode _ hp : HParams instance . See decode _ hparams ( ) above . decode _ to _ file : str . Prefix for filenames . Used to generated filenames to which decoded predictions are written . output _ dir : str . Output directory . Only used for writing images . log _ results : bool . If False , return encoded predictions without any further processing . checkpoint _ path : str . Path to load model checkpoint from . If unspecified , Estimator ' s default is used . Returns : If decode _ hp . decode _ in _ memory is True : List of dicts , one per example . Values are either numpy arrays or decoded strings . If decode _ hp . decode _ in _ memory is False : An empty list ."""
# Get the predictions as an iterable predictions = estimator . predict ( infer_input_fn , checkpoint_path = checkpoint_path ) if not log_results : return list ( predictions ) # Prepare output file writers if decode _ to _ file passed decode_to_file = decode_to_file or decode_hp . decode_to_file if decode_to_file : output_filepath = _decode_filename ( decode_to_file , problem_name , decode_hp ) parts = output_filepath . split ( "." ) parts [ - 1 ] = "targets" target_filepath = "." . join ( parts ) parts [ - 1 ] = "inputs" input_filepath = "." . join ( parts ) output_file = tf . gfile . Open ( output_filepath , "w" ) target_file = tf . gfile . Open ( target_filepath , "w" ) input_file = tf . gfile . Open ( input_filepath , "w" ) problem_hparams = hparams . problem_hparams # Inputs vocabulary is set to targets if there are no inputs in the problem , # e . g . , for language models where the inputs are just a prefix of targets . has_input = "inputs" in problem_hparams . vocabulary inputs_vocab_key = "inputs" if has_input else "targets" inputs_vocab = problem_hparams . vocabulary [ inputs_vocab_key ] targets_vocab = problem_hparams . vocabulary [ "targets" ] num_eval_samples = 0 # all _ outputs [ i ] [ j ] = ( input : str , output : str , target : str ) . Input , # decoded output , and target strings for example i , beam rank j . all_outputs = [ ] for num_predictions , prediction in enumerate ( predictions ) : num_eval_samples += 1 num_predictions += 1 inputs = prediction . get ( "inputs" ) targets = prediction . get ( "targets" ) outputs = prediction . get ( "outputs" ) # Log predictions decoded_outputs = [ ] # [ ( str , str , str ) ] . See all _ outputs above . if decode_hp . decode_in_memory : all_outputs . append ( decoded_outputs ) decoded_scores = [ ] if decode_hp . return_beams : output_beams = np . split ( outputs , decode_hp . beam_size , axis = 0 ) scores = None if "scores" in prediction : scores = np . split ( prediction [ "scores" ] , decode_hp . beam_size , axis = 0 ) for i , beam in enumerate ( output_beams ) : tf . logging . info ( "BEAM %d:" % i ) score = scores and scores [ i ] decoded = log_decode_results ( inputs , beam , problem_name , num_predictions , inputs_vocab , targets_vocab , save_images = decode_hp . save_images , output_dir = output_dir , identity_output = decode_hp . identity_output , targets = targets , log_results = log_results ) decoded_outputs . append ( decoded ) if decode_hp . write_beam_scores : decoded_scores . append ( score ) else : decoded = log_decode_results ( inputs , outputs , problem_name , num_predictions , inputs_vocab , targets_vocab , save_images = decode_hp . save_images , output_dir = output_dir , identity_output = decode_hp . identity_output , targets = targets , log_results = log_results , skip_eos_postprocess = decode_hp . skip_eos_postprocess ) decoded_outputs . append ( decoded ) # Write out predictions if decode _ to _ file passed if decode_to_file : for i , ( d_input , d_output , d_target ) in enumerate ( decoded_outputs ) : # Skip if all padding if d_input and re . match ( "^({})+$" . format ( text_encoder . PAD ) , d_input ) : continue beam_score_str = "" if decode_hp . write_beam_scores : beam_score_str = "\t%.2f" % decoded_scores [ i ] output_file . write ( str ( d_output ) + beam_score_str + decode_hp . delimiter ) target_file . write ( str ( d_target ) + decode_hp . delimiter ) input_file . write ( str ( d_input ) + decode_hp . delimiter ) if ( decode_hp . num_samples >= 0 and num_predictions >= decode_hp . num_samples ) : break mlperf_log . transformer_print ( key = mlperf_log . EVAL_SIZE , value = num_eval_samples , hparams = hparams ) if decode_to_file : output_file . close ( ) target_file . close ( ) input_file . close ( ) return all_outputs
def get_url_and_revision_from_pip_url ( cls , pip_url ) : """Prefixes stub URLs like ' user @ hostname : user / repo . git ' with ' ssh : / / ' . That ' s required because although they use SSH they sometimes doesn ' t work with a ssh : / / scheme ( e . g . Github ) . But we need a scheme for parsing . Hence we remove it again afterwards and return it as a stub . The manpage for git - clone ( 1 ) refers to this as the " scp - like styntax " ."""
if '://' not in pip_url : assert 'file:' not in pip_url pip_url = pip_url . replace ( 'git+' , 'git+ssh://' ) url , rev = super ( GitRepo , cls ) . get_url_and_revision_from_pip_url ( pip_url ) url = url . replace ( 'ssh://' , '' ) elif 'github.com:' in pip_url : raise exc . LibVCSException ( "Repo %s is malformatted, please use the convention %s for" "ssh / private GitHub repositories." % ( pip_url , "git+https://github.com/username/repo.git" ) ) else : url , rev = super ( GitRepo , cls ) . get_url_and_revision_from_pip_url ( pip_url ) return url , rev
def get_service_version ( self , service_id , mode = 'production' , version = 'default' ) : '''get _ service _ version ( self , service _ id , mode = ' production ' , version = ' default ' ) | Get a specific version details of a given service . Opereto will try to fetch the requested service version . If not found , it will return the default production version . The " actual _ version " field of the returned JSON indicates what version of the service is returned . If the actual version is null , it means that this service does not have any version at all . To make it operational , you will have to import or upload a default version . : Parameters : * * service _ id * ( ` string ` ) - - Identifier of an existing service * * mode * ( ` string ` ) - - development / production . Default is production * * version * ( ` string ` ) - - version of the service ( " default " is the default . : return : json service version details : Example : . . code - block : : python service _ version = opereto _ client . get _ service _ version ( serviceId , mode = ' development ' , version = ' 111 ' )'''
return self . _call_rest_api ( 'get' , '/services/' + service_id + '/' + mode + '/' + version , error = 'Failed to fetch service information' )
def pp_hex ( raw , reverse = True ) : """Return a pretty - printed ( hex style ) version of a binary string . Args : raw ( bytes ) : any sequence of bytes reverse ( bool ) : True if output should be in reverse order . Returns : Hex string corresponding to input byte sequence ."""
if not reverse : return '' . join ( [ '{:02x}' . format ( v ) for v in bytearray ( raw ) ] ) return '' . join ( reversed ( [ '{:02x}' . format ( v ) for v in bytearray ( raw ) ] ) )
def wb020 ( self , value = None ) : """Corresponds to IDD Field ` wb020 ` Wet - bulb temperature corresponding to 02.0 % annual cumulative frequency of occurrence Args : value ( float ) : value for IDD Field ` wb020 ` Unit : C if ` value ` is None it will not be checked against the specification and is assumed to be a missing value Raises : ValueError : if ` value ` is not a valid value"""
if value is not None : try : value = float ( value ) except ValueError : raise ValueError ( 'value {} need to be of type float ' 'for field `wb020`' . format ( value ) ) self . _wb020 = value
def modulation_type ( self , value : int ) : """0 - " ASK " , 1 - " FSK " , 2 - " PSK " , 3 - " APSK ( QAM ) " : param value : : return :"""
if self . __modulation_type != value : self . __modulation_type = value self . _qad = None self . modulation_type_changed . emit ( self . __modulation_type ) if not self . block_protocol_update : self . protocol_needs_update . emit ( )
def columns_exist ( inspect_dataset ) : """This function will take a dataset and add expectations that each column present exists . Args : inspect _ dataset ( great _ expectations . dataset ) : The dataset to inspect and to which to add expectations ."""
# Attempt to get column names . For pandas , columns is just a list of strings if not hasattr ( inspect_dataset , "columns" ) : warnings . warn ( "No columns list found in dataset; no autoinspection performed." ) return elif isinstance ( inspect_dataset . columns [ 0 ] , string_types ) : columns = inspect_dataset . columns elif isinstance ( inspect_dataset . columns [ 0 ] , dict ) and "name" in inspect_dataset . columns [ 0 ] : columns = [ col [ 'name' ] for col in inspect_dataset . columns ] else : raise AutoInspectError ( "Unable to determine column names for this dataset." ) create_multiple_expectations ( inspect_dataset , columns , "expect_column_to_exist" )
def raw_to_bv ( self ) : """A counterpart to FP . raw _ to _ bv - does nothing and returns itself ."""
if self . symbolic : return BVS ( next ( iter ( self . variables ) ) . replace ( self . STRING_TYPE_IDENTIFIER , self . GENERATED_BVS_IDENTIFIER ) , self . length ) else : return BVV ( ord ( self . args [ 0 ] ) , self . length )
def __profile_definition ( self , pipeline_name ) : """Prepare the profiles extraction from a specific profile"""
pipe = self . flsh . Operations [ pipeline_name ] x_st = pipe . GetUserVariable ( PROFILE_LENGTH_ST ) . Variable ( ) x_non_st = pipe . GetUserVariable ( PROFILE_LENGTH_NON_ST ) . Variable ( ) timesteps = pipe . GetUserVariable ( PROFILE_TIME ) . Variable ( ) self . pipes [ pipeline_name ] = { 'grid' : x_st , 'non_st_grid' : x_non_st , 'timesteps' : timesteps , 'data' : { } } return pipe
def fill ( self , postf_un_ops : str ) : """Insert : * math styles * other styles * unary prefix operators without brackets * defaults"""
for op , dic in self . ops . items ( ) : if 'postf' not in dic : dic [ 'postf' ] = self . postf self . ops = OrderedDict ( self . styles . spec ( postf_un_ops ) + self . other_styles . spec ( postf_un_ops ) + self . pref_un_greedy . spec ( ) + list ( self . ops . items ( ) ) ) for op , dic in self . ops . items ( ) : dic [ 'postf' ] = re . compile ( dic [ 'postf' ] ) self . regex = _search_regex ( self . ops , self . regex_pat )
def get_grading_status ( section_id , act_as = None ) : """Return a restclients . models . gradepage . GradePageStatus object on the given course"""
url = "{}/{}" . format ( url_prefix , quote ( section_id ) ) headers = { } if act_as is not None : headers [ "X-UW-Act-as" ] = act_as response = get_resource ( url , headers ) return _object_from_json ( url , response )
def is_base_tuple ( type_str ) : """A predicate that matches a tuple type with no array dimension list ."""
try : abi_type = grammar . parse ( type_str ) except exceptions . ParseError : return False return isinstance ( abi_type , grammar . TupleType ) and abi_type . arrlist is None
def run ( self , * coros ) : """Pass in all the coroutines you want to run , it will wrap each one in a task , run it and wait for the result . Return a list with all results , this is returned in the same order coros are passed in ."""
tasks = [ asyncio . ensure_future ( coro ( ) ) for coro in coros ] done , _ = self . loop . run_until_complete ( asyncio . wait ( tasks ) ) return [ t . result ( ) for t in done ]
def connection_from_host ( self , host , port = None , scheme = 'http' , pool_kwargs = None ) : """Get a : class : ` ConnectionPool ` based on the host , port , and scheme . If ` ` port ` ` isn ' t given , it will be derived from the ` ` scheme ` ` using ` ` urllib3 . connectionpool . port _ by _ scheme ` ` . If ` ` pool _ kwargs ` ` is provided , it is merged with the instance ' s ` ` connection _ pool _ kw ` ` variable and used to create the new connection pool , if one is needed ."""
if not host : raise LocationValueError ( "No host specified." ) request_context = self . _merge_pool_kwargs ( pool_kwargs ) request_context [ 'scheme' ] = scheme or 'http' if not port : port = port_by_scheme . get ( request_context [ 'scheme' ] . lower ( ) , 80 ) request_context [ 'port' ] = port request_context [ 'host' ] = host return self . connection_from_context ( request_context )
def setup_tmpltbank_without_frames ( workflow , output_dir , tags = None , independent_ifos = False , psd_files = None ) : '''Setup CBC workflow to use a template bank ( or banks ) that are generated in the workflow , but do not use the data to estimate a PSD , and therefore do not vary over the duration of the workflow . This can either generate one bank that is valid for all ifos at all times , or multiple banks that are valid only for a single ifo at all times ( one bank per ifo ) . Parameters workflow : pycbc . workflow . core . Workflow An instanced class that manages the constructed workflow . output _ dir : path string The directory where the template bank outputs will be placed . tags : list of strings If given these tags are used to uniquely name and identify output files that would be produced in multiple calls to this function . independent _ ifos : Boolean , optional ( default = False ) If given this will produce one template bank per ifo . If not given there will be on template bank to cover all ifos . psd _ file : pycbc . workflow . core . FileList The file list containing predefined PSDs , if provided . Returns tmplt _ banks : pycbc . workflow . core . FileList The FileList holding the details of the template bank ( s ) .'''
if tags is None : tags = [ ] cp = workflow . cp # Need to get the exe to figure out what sections are analysed , what is # discarded etc . This should * not * be hardcoded , so using a new executable # will require a bit of effort here . . . . ifos = workflow . ifos fullSegment = workflow . analysis_time tmplt_bank_exe = os . path . basename ( cp . get ( 'executables' , 'tmpltbank' ) ) # Can not use lalapps _ template bank with this if tmplt_bank_exe == 'lalapps_tmpltbank' : errMsg = "Lalapps_tmpltbank cannot be used to generate template banks " errMsg += "without using frames. Try another code." raise ValueError ( errMsg ) # Select the appropriate class exe_instance = select_tmpltbank_class ( tmplt_bank_exe ) tmplt_banks = FileList ( [ ] ) # Make the distinction between one bank for all ifos and one bank per ifo if independent_ifos : ifoList = [ ifo for ifo in ifos ] else : ifoList = [ [ ifo for ifo in ifos ] ] # Check for the write _ psd flag if cp . has_option_tags ( "workflow-tmpltbank" , "tmpltbank-write-psd-file" , tags ) : exe_instance . write_psd = True else : exe_instance . write_psd = False for ifo in ifoList : job_instance = exe_instance ( workflow . cp , 'tmpltbank' , ifo = ifo , out_dir = output_dir , tags = tags , psd_files = psd_files ) node = job_instance . create_nodata_node ( fullSegment ) workflow . add_node ( node ) tmplt_banks += node . output_files return tmplt_banks
def variable_summaries ( var ) : """Attach a lot of summaries to a Tensor ( for TensorBoard visualization ) ."""
with tf . name_scope ( 'summaries' ) : mean = tf . reduce_mean ( var ) tf . summary . scalar ( 'mean' , mean ) with tf . name_scope ( 'stddev' ) : stddev = tf . sqrt ( tf . reduce_mean ( tf . square ( var - mean ) ) ) tf . summary . scalar ( 'stddev' , stddev ) tf . summary . scalar ( 'max' , tf . reduce_max ( var ) ) tf . summary . scalar ( 'min' , tf . reduce_min ( var ) ) tf . summary . histogram ( 'histogram' , var )
def get_between_times ( self , t1 , t2 , target = None ) : """Query for OPUS data between times t1 and t2. Parameters t1 , t2 : datetime . datetime , strings Start and end time for the query . If type is datetime , will be converted to isoformat string . If type is string already , it needs to be in an accepted international format for time strings . target : str Potential target for the observation query . Most likely will reduce the amount of data matching the query a lot . Returns None , but set ' s state of the object to have new query results stored in self . obsids ."""
try : # checking if times have isoformat ( ) method ( datetimes have ) t1 = t1 . isoformat ( ) t2 = t2 . isoformat ( ) except AttributeError : # if not , should already be a string , so do nothing . pass myquery = self . _get_time_query ( t1 , t2 ) if target is not None : myquery [ "target" ] = target self . create_files_request ( myquery , fmt = "json" ) self . unpack_json_response ( )
def list_blobs ( storage_conn = None , ** kwargs ) : '''. . versionadded : : 2015.8.0 List blobs associated with the container'''
if not storage_conn : storage_conn = get_storage_conn ( opts = kwargs ) if 'container' not in kwargs : raise SaltSystemExit ( code = 42 , msg = 'An storage container name must be specified as "container"' ) data = storage_conn . list_blobs ( container_name = kwargs [ 'container' ] , prefix = kwargs . get ( 'prefix' , None ) , marker = kwargs . get ( 'marker' , None ) , maxresults = kwargs . get ( 'maxresults' , None ) , include = kwargs . get ( 'include' , None ) , delimiter = kwargs . get ( 'delimiter' , None ) , ) ret = { } for item in data . blobs : ret [ item . name ] = object_to_dict ( item ) return ret
def Decode ( self , attribute , value ) : """Decode the value to the required type ."""
required_type = self . _attribute_types . get ( attribute , "bytes" ) if required_type == "integer" : return rdf_structs . SignedVarintReader ( value , 0 ) [ 0 ] elif required_type == "unsigned_integer" : return rdf_structs . VarintReader ( value , 0 ) [ 0 ] elif required_type == "string" : if isinstance ( value , bytes ) : return value . decode ( "utf-8" ) else : return utils . SmartUnicode ( value ) else : return value
def setColorAlpha ( self , fixed = None , proportional = None ) : """Change the alpha of the current : py : class : ` Color ` . : param fixed : Set the absolute 0-1 value of the alpha . : param proportional : Set the relative value of the alpha ( Es : If the current alpha is 0.8 , a proportional value of 0.5 will set the final value to 0.4 ) . : rtype : Nothing ."""
if fixed != None : self . color . set_alpha ( fixed ) elif proportional != None : self . color . set_alpha ( self . color . get_alpha ( ) * proportional )
def add_to_collection ( self , request , pk = None ) : """Add Entity to a collection ."""
entity = self . get_object ( ) # TODO use ` self . get _ ids ` ( and elsewhere ) . Backwards # incompatible because raised error ' s response contains # ` ` detail ` ` instead of ` ` error ` ` ) . if 'ids' not in request . data : return Response ( { "error" : "`ids` parameter is required" } , status = status . HTTP_400_BAD_REQUEST ) for collection_id in request . data [ 'ids' ] : self . _get_collection_for_user ( collection_id , request . user ) for collection_id in request . data [ 'ids' ] : entity . collections . add ( collection_id ) collection = Collection . objects . get ( pk = collection_id ) for data in entity . data . all ( ) : collection . data . add ( data ) return Response ( )
def setup_sort_column ( widget , column = 0 , attribute = None , model = None ) : """* model * is the : class : ` TreeModelSort ` to act on . Defaults to what is displayed . Pass this if you sort before filtering . * widget * is a clickable : class : ` TreeViewColumn ` . * column * is an integer addressing the column in * model * that holds your objects . * attribute * is a string naming an object attribute to display . Defaults to the name of * widget * ."""
if not attribute : attribute = widget . get_name ( ) if attribute is None : raise TypeError ( "Column not named" ) widget . connect ( 'clicked' , _clicked , column , attribute , model )
def namespace ( self , value ) : """Update the query ' s namespace . : type value : str"""
if not isinstance ( value , str ) : raise ValueError ( "Namespace must be a string" ) self . _namespace = value
def simple_merge ( kls , skeletons ) : """Simple concatenation of skeletons into one object without adding edges between them ."""
if len ( skeletons ) == 0 : return PrecomputedSkeleton ( ) if type ( skeletons [ 0 ] ) is np . ndarray : skeletons = [ skeletons ] ct = 0 edges = [ ] for skel in skeletons : edge = skel . edges + ct edges . append ( edge ) ct += skel . vertices . shape [ 0 ] return PrecomputedSkeleton ( vertices = np . concatenate ( [ skel . vertices for skel in skeletons ] , axis = 0 ) , edges = np . concatenate ( edges , axis = 0 ) , radii = np . concatenate ( [ skel . radii for skel in skeletons ] , axis = 0 ) , vertex_types = np . concatenate ( [ skel . vertex_types for skel in skeletons ] , axis = 0 ) , segid = skeletons [ 0 ] . id , )
def moving_average ( iterable , n ) : """From Python collections module documentation moving _ average ( [ 40 , 30 , 50 , 46 , 39 , 44 ] ) - - > 40.0 42.0 45.0 43.0"""
it = iter ( iterable ) d = collections . deque ( itertools . islice ( it , n - 1 ) ) d . appendleft ( 0 ) s = sum ( d ) for elem in it : s += elem - d . popleft ( ) d . append ( elem ) yield s / float ( n )
def ParseOptions ( cls , options , configuration_object ) : """Parses and validates options . Args : options ( argparse . Namespace ) : parser options . configuration _ object ( CLITool ) : object to be configured by the argument helper . Raises : BadConfigObject : when the configuration object is of the wrong type . BadConfigOption : when a configuration parameter fails validation ."""
if not isinstance ( configuration_object , tools . CLITool ) : raise errors . BadConfigObject ( 'Configuration object is not an instance of CLITool' ) number_of_extraction_workers = cls . _ParseNumericOption ( options , 'workers' , default_value = 0 ) if number_of_extraction_workers < 0 : raise errors . BadConfigOption ( 'Invalid number of extraction workers value cannot be negative.' ) worker_memory_limit = cls . _ParseNumericOption ( options , 'worker_memory_limit' ) if worker_memory_limit and worker_memory_limit < 0 : raise errors . BadConfigOption ( 'Invalid worker memory limit value cannot be negative.' ) setattr ( configuration_object , '_number_of_extraction_workers' , number_of_extraction_workers ) setattr ( configuration_object , '_worker_memory_limit' , worker_memory_limit )
def G_calc ( item1 , item2 ) : """Calculate G - measure & G - mean . : param item1 : PPV or TPR or TNR : type item1 : float : param item2 : PPV or TPR or TNR : type item2 : float : return : G - measure or G - mean as float"""
try : result = math . sqrt ( item1 * item2 ) return result except Exception : return "None"
def p_x_boolx ( self , t ) : """expression : unop expression | expression binop expression"""
# todo : ply exposes precedence with % prec , use it . if len ( t ) == 4 : t [ 0 ] = bin_priority ( t [ 2 ] , t [ 1 ] , t [ 3 ] ) elif len ( t ) == 3 : t [ 0 ] = un_priority ( t [ 1 ] , t [ 2 ] ) else : raise NotImplementedError ( 'unk_len' , len ( t ) ) # pragma : no cover
def set_map_alpha ( alpha ) : """Alpha color of the map tiles : param alpha : int between 0 and 255 . 0 is completely dark , 255 is full brightness"""
if alpha < 0 or alpha > 255 : raise Exception ( 'invalid alpha ' + str ( alpha ) ) _global_config . map_alpha = alpha
def sync_config_tasks ( self ) : """Performs the first sync of a list of tasks , often defined in the config file ."""
tasks_by_hash = { _hash_task ( t ) : t for t in self . config_tasks } for task in self . all_tasks : if tasks_by_hash . get ( task [ "hash" ] ) : del tasks_by_hash [ task [ "hash" ] ] else : self . collection . remove ( { "_id" : task [ "_id" ] } ) log . debug ( "Scheduler: deleted %s" % task [ "hash" ] ) # What remains are the new ones to be inserted for h , task in tasks_by_hash . items ( ) : task [ "hash" ] = h task [ "datelastqueued" ] = datetime . datetime . fromtimestamp ( 0 ) if task . get ( "dailytime" ) : # Because MongoDB can store datetimes but not times , # we add today ' s date to the dailytime . # The date part will be discarded in check ( ) task [ "dailytime" ] = datetime . datetime . combine ( datetime . datetime . utcnow ( ) , task [ "dailytime" ] ) task [ "interval" ] = 3600 * 24 # Avoid to queue task in check ( ) if today dailytime is already passed if datetime . datetime . utcnow ( ) . time ( ) > task [ "dailytime" ] . time ( ) : task [ "datelastqueued" ] = datetime . datetime . utcnow ( ) self . collection . find_one_and_update ( { "hash" : task [ "hash" ] } , { "$set" : task } , upsert = True ) log . debug ( "Scheduler: added %s" % task [ "hash" ] )
def t384 ( args ) : """% prog t384 Print out a table converting between 96 well to 384 well"""
p = OptionParser ( t384 . __doc__ ) opts , args = p . parse_args ( args ) plate , splate = get_plate ( ) fw = sys . stdout for i in plate : for j , p in enumerate ( i ) : if j != 0 : fw . write ( '|' ) fw . write ( p ) fw . write ( '\n' )
def hessian ( self , x , y , coeffs , beta , center_x = 0 , center_y = 0 ) : """returns Hessian matrix of function d ^ 2f / dx ^ 2 , d ^ f / dy ^ 2 , d ^ 2 / dxdy"""
shapelets = self . _createShapelet ( coeffs ) n_order = self . _get_num_n ( len ( coeffs ) ) dxx_shapelets = self . _dxx_shapelets ( shapelets , beta ) dyy_shapelets = self . _dyy_shapelets ( shapelets , beta ) dxy_shapelets = self . _dxy_shapelets ( shapelets , beta ) n = len ( np . atleast_1d ( x ) ) if n <= 1 : f_xx = self . _shapeletOutput ( x , y , beta , dxx_shapelets , precalc = False ) f_yy = self . _shapeletOutput ( x , y , beta , dyy_shapelets , precalc = False ) f_xy = self . _shapeletOutput ( x , y , beta , dxy_shapelets , precalc = False ) else : H_x , H_y = self . pre_calc ( x , y , beta , n_order + 2 , center_x , center_y ) f_xx = self . _shapeletOutput ( H_x , H_y , beta , dxx_shapelets ) f_yy = self . _shapeletOutput ( H_x , H_y , beta , dyy_shapelets ) f_xy = self . _shapeletOutput ( H_x , H_y , beta , dxy_shapelets ) return f_xx , f_yy , f_xy
def build ( self , client , nobuild = False , usecache = True , pull = False ) : """Drives the build of the final image - get the list of steps and execute them . Args : client ( docker . Client ) : docker client object that will build the image nobuild ( bool ) : just create dockerfiles , don ' t actually build the image usecache ( bool ) : use docker cache , or rebuild everything from scratch ? pull ( bool ) : try to pull new versions of repository images ?"""
if not nobuild : self . update_source_images ( client , usecache = usecache , pull = pull ) width = utils . get_console_width ( ) cprint ( '\n' + '=' * width , color = 'white' , attrs = [ 'bold' ] ) line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % ( self . targetname , self . imagename , self . steps [ - 1 ] . sourcefile ) cprint ( _centered ( line , width ) , color = 'blue' , attrs = [ 'bold' ] ) for istep , step in enumerate ( self . steps ) : print ( colored ( '* Step' , 'blue' ) , colored ( '%d/%d' % ( istep + 1 , len ( self . steps ) ) , 'blue' , attrs = [ 'bold' ] ) , colored ( 'for image' , color = 'blue' ) , colored ( self . imagename , color = 'blue' , attrs = [ 'bold' ] ) ) if not nobuild : if step . bust_cache : stackkey = self . _get_stack_key ( istep ) if stackkey in _rebuilt : step . bust_cache = False step . build ( client , usecache = usecache ) print ( colored ( "* Created intermediate image" , 'green' ) , colored ( step . buildname , 'green' , attrs = [ 'bold' ] ) , end = '\n\n' ) if step . bust_cache : _rebuilt . add ( stackkey ) finalimage = step . buildname if not nobuild : self . finalizenames ( client , finalimage ) line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)' % ( self . targetname , self . imagename , self . steps [ - 1 ] . sourcefile ) cprint ( _centered ( line , width ) , color = 'green' , attrs = [ 'bold' ] ) cprint ( '=' * width , color = 'white' , attrs = [ 'bold' ] , end = '\n\n' )
def find_first ( data , what ) : '''Search for ` ` what ` ` in the iterable ` ` data ` ` and return the index of the first match . Return ` ` None ` ` if no match found .'''
for i , line in enumerate ( data ) : if contains ( line , what ) : return i return None
def _add_impact_severity ( self , variant_obj , gemini_variant ) : """Add the impact severity for the most severe consequence Args : variant _ obj ( puzzle . models . Variant ) gemini _ variant ( GeminiQueryRow )"""
gemini_impact = gemini_variant [ 'impact_severity' ] if gemini_impact == 'MED' : gemini_impact = 'MEDIUM' variant_obj . impact_severity = gemini_impact
def _narrow_unichr ( code_point ) : """Retrieves the unicode character representing any given code point , in a way that won ' t break on narrow builds . This is necessary because the built - in unichr function will fail for ordinals above 0xFFFF on narrow builds ( UCS2 ) ; ordinals above 0xFFFF would require recalculating and combining surrogate pairs . This avoids that by retrieving the unicode character that was initially read . Args : code _ point ( int | CodePoint ) : An int or a subclass of int that contains the unicode character representing its code point in an attribute named ' char ' ."""
try : if len ( code_point . char ) > 1 : return code_point . char except AttributeError : pass return six . unichr ( code_point )
def p_if_statement_2 ( self , p ) : """if _ statement : IF LPAREN expr RPAREN statement ELSE statement"""
p [ 0 ] = self . asttypes . If ( predicate = p [ 3 ] , consequent = p [ 5 ] , alternative = p [ 7 ] ) p [ 0 ] . setpos ( p )
def _format_exception ( err , is_failure , stdout = None , stderr = None ) : """Converts a sys . exc _ info ( ) - style tuple of values into a string ."""
exctype , value , tb = err # Skip test runner traceback levels while tb and _is_relevant_tb_level ( tb ) : tb = tb . tb_next if is_failure : # Skip assert * ( ) traceback levels length = _count_relevant_tb_levels ( tb ) msgLines = traceback . format_exception ( exctype , value , tb , length ) else : msgLines = traceback . format_exception ( exctype , value , tb ) encoding = locale . getpreferredencoding ( ) msgLines = [ _decode ( line , encoding ) for line in msgLines ] if stdout : if not stdout . endswith ( '\n' ) : stdout += '\n' msgLines . append ( STDOUT_LINE % stdout ) if stderr : if not stderr . endswith ( '\n' ) : stderr += '\n' msgLines . append ( STDERR_LINE % stderr ) return '' . join ( msgLines )
def json ( content , request = None , response = None , ensure_ascii = False , ** kwargs ) : """JSON ( Javascript Serialized Object Notation )"""
if hasattr ( content , 'read' ) : return content if isinstance ( content , tuple ) and getattr ( content , '_fields' , None ) : content = { field : getattr ( content , field ) for field in content . _fields } return json_converter . dumps ( content , default = _json_converter , ensure_ascii = ensure_ascii , ** kwargs ) . encode ( 'utf8' )
def configure_owner ( self , owner = 'www-data' ) : """Shortcut to set process owner data . : param str | unicode owner : Sets user and group . Default : ` ` www - data ` ` ."""
if owner is not None : self . main_process . set_owner_params ( uid = owner , gid = owner ) return self
def get_command ( name ) : '''Get the command function * name *'''
command = global_commands_table . get ( name . lower ( ) ) if not command : raise CommandNotFound ( name ) return command
def normalize ( code ) : """Normalize language codes to ISO 639-2 . If all conversions fails , return the ` code ` as it was given . Args : code ( str ) : Language / country code . Returns : str : ISO 639-2 country code ."""
if len ( code ) == 3 : return code normalized = translate ( code ) if normalized : return normalized country = countries . get ( code , None ) if country : return country . alpha3 . lower ( ) return code
def set_perms ( path , grant_perms = None , deny_perms = None , inheritance = True , reset = False ) : '''Set permissions for the given path Args : path ( str ) : The full path to the directory . grant _ perms ( dict ) : A dictionary containing the user / group and the basic permissions to grant , ie : ` ` { ' user ' : { ' perms ' : ' basic _ permission ' } } ` ` . You can also set the ` ` applies _ to ` ` setting here . The default for ` ` applise _ to ` ` is ` ` this _ folder _ subfolders _ files ` ` . Specify another ` ` applies _ to ` ` setting like this : . . code - block : : yaml { ' user ' : { ' perms ' : ' full _ control ' , ' applies _ to ' : ' this _ folder ' } } To set advanced permissions use a list for the ` ` perms ` ` parameter , ie : . . code - block : : yaml { ' user ' : { ' perms ' : [ ' read _ attributes ' , ' read _ ea ' ] , ' applies _ to ' : ' this _ folder ' } } To see a list of available attributes and applies to settings see the documentation for salt . utils . win _ dacl . A value of ` ` None ` ` will make no changes to the ` ` grant ` ` portion of the DACL . Default is ` ` None ` ` . deny _ perms ( dict ) : A dictionary containing the user / group and permissions to deny along with the ` ` applies _ to ` ` setting . Use the same format used for the ` ` grant _ perms ` ` parameter . Remember , deny permissions supersede grant permissions . A value of ` ` None ` ` will make no changes to the ` ` deny ` ` portion of the DACL . Default is ` ` None ` ` . inheritance ( bool ) : If ` ` True ` ` the object will inherit permissions from the parent , if ` ` False ` ` , inheritance will be disabled . Inheritance setting will not apply to parent directories if they must be created . Default is ` ` False ` ` . reset ( bool ) : If ` ` True ` ` the existing DCL will be cleared and replaced with the settings defined in this function . If ` ` False ` ` , new entries will be appended to the existing DACL . Default is ` ` False ` ` . . . versionadded : : 2018.3.0 Returns : bool : True if successful Raises : CommandExecutionError : If unsuccessful CLI Example : . . code - block : : bash # To grant the ' Users ' group ' read & execute ' permissions . salt ' * ' file . set _ perms C : \\ Temp \\ " { ' Users ' : { ' perms ' : ' read _ execute ' } } " # Locally using salt call salt - call file . set _ perms C : \\ Temp \\ " { ' Users ' : { ' perms ' : ' read _ execute ' , ' applies _ to ' : ' this _ folder _ only ' } } " # Specify advanced attributes with a list salt ' * ' file . set _ perms C : \\ Temp \\ " { ' jsnuffy ' : { ' perms ' : [ ' read _ attributes ' , ' read _ ea ' ] , ' applies _ to ' : ' this _ folder _ only ' } } "'''
return __utils__ [ 'dacl.set_perms' ] ( obj_name = path , obj_type = 'file' , grant_perms = grant_perms , deny_perms = deny_perms , inheritance = inheritance , reset = reset )
def mute ( self , mute ) : """Mute receiver"""
try : if ( mute and self . _mute == STATE_OFF ) : self . send_command ( "MUTE_TOGGLE" ) self . _mute = STATE_ON return True elif not mute and self . _mute == STATE_ON : self . send_command ( "MUTE_TOGGLE" ) self . _mute = STATE_OFF return True except requests . exceptions . RequestException : _LOGGER . error ( "Connection error: mute command not sent." ) return False
def answer ( request ) : """Save the answer . GET parameters : html : turn on the HTML version of the API BODY json in following format : " answer " : # answer , - - for one answer " answers " : [ # answer , # answer , # answer . . . ] - - for multiple answers answer = { " answer _ class " : str , - - class of answer to save ( e . g . , flashcard _ answer ) " response _ time " : int , - - response time in milliseconds " meta " : " str " - - optional information " time _ gap " : int - - waiting time in frontend in seconds . . . - - other fields depending on aswer type ( see from _ json method of Django model class )"""
if request . method == 'GET' : return render ( request , 'models_answer.html' , { } , help_text = answer . __doc__ ) elif request . method == 'POST' : practice_filter = get_filter ( request ) practice_context = PracticeContext . objects . from_content ( practice_filter ) saved_answers = _save_answers ( request , practice_context , True ) return render_json ( request , saved_answers , status = 200 , template = 'models_answer.html' ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
def get_rst_relation_root_nodes ( docgraph , data = True , rst_namespace = 'rst' ) : """yield all nodes that dominate one or more RST relations in the given document graph ( in no particular order ) . Parameters docgraph : DiscourseDocumentGraph a document graph which contains RST annotations data : bool If True ( default ) , yields ( node ID , relation name , list of tokens ) tuples . If False , yields just node IDs . rst _ namespace : str The namespace that the RST annotations use ( default : rst ) Yields relations : str or ( str , str , list of str ) tuples If data = False , this will just yield node IDs of the nodes that directly dominate an RST relation . If data = True , this yields tuples of the form : ( node ID , relation name , list of tokens that this relation spans ) ."""
rel_attr = rst_namespace + ':rel_name' for node_id , node_attrs in docgraph . nodes_iter ( data = True ) : if rel_attr in node_attrs and node_attrs [ rel_attr ] != 'span' : yield ( node_id , node_attrs [ rel_attr ] , get_span ( docgraph , node_id ) ) if data else ( node_id )
def read ( self , ** keys ) : """Read the data from disk and return as a numpy array"""
if self . is_scalar : data = self . fitshdu . read_column ( self . columns , ** keys ) else : c = keys . get ( 'columns' , None ) if c is None : keys [ 'columns' ] = self . columns data = self . fitshdu . read ( ** keys ) return data
def gene_counts ( self ) : """Returns number of elements overlapping each gene name . Expects the derived class ( VariantCollection or EffectCollection ) to have an implementation of groupby _ gene _ name ."""
return { gene_name : len ( group ) for ( gene_name , group ) in self . groupby_gene_name ( ) . items ( ) }
async def run ( self ) : """Runs the agent . Answer to the requests made by the Backend . May raise an asyncio . CancelledError , in which case the agent should clean itself and restart completely ."""
self . _logger . info ( "Agent started" ) self . __backend_socket . connect ( self . __backend_addr ) # Tell the backend we are up and have ` concurrency ` threads available self . _logger . info ( "Saying hello to the backend" ) await ZMQUtils . send ( self . __backend_socket , AgentHello ( self . __friendly_name , self . __concurrency , self . environments ) ) self . __last_ping = time . time ( ) run_listen = self . _loop . create_task ( self . __run_listen ( ) ) self . _loop . call_later ( 1 , self . _create_safe_task , self . __check_last_ping ( run_listen ) ) await run_listen
def _get_table_info ( self ) : """Inspect the base to get field names"""
self . fields = [ ] self . field_info = { } self . cursor . execute ( 'PRAGMA table_info (%s)' % self . name ) for field_info in self . cursor . fetchall ( ) : fname = field_info [ 1 ] . encode ( 'utf-8' ) self . fields . append ( fname ) ftype = field_info [ 2 ] . encode ( 'utf-8' ) info = { 'type' : ftype } # can be null ? info [ 'NOT NULL' ] = field_info [ 3 ] != 0 # default value default = field_info [ 4 ] if isinstance ( default , unicode ) : default = guess_default_fmt ( default ) info [ 'DEFAULT' ] = default self . field_info [ fname ] = info
def cmd ( send , msg , args ) : """Pesters somebody . Syntax : { command } < nick > < message >"""
if not msg or len ( msg . split ( ) ) < 2 : send ( "Pester needs at least two arguments." ) return match = re . match ( '(%s+) (.*)' % args [ 'config' ] [ 'core' ] [ 'nickregex' ] , msg ) if match : message = match . group ( 2 ) + " " send ( '%s: %s' % ( match . group ( 1 ) , message * 3 ) ) else : send ( "Invalid Syntax." )
def check_version ( mod , required ) : """Require minimum version of module using ` ` _ _ version _ _ ` ` member ."""
vers = tuple ( int ( v ) for v in mod . __version__ . split ( '.' ) [ : 3 ] ) if vers < required : req = '.' . join ( str ( v ) for v in required ) raise ImproperlyConfigured ( "Module \"%s\" version (%s) must be >= %s." % ( mod . __name__ , mod . __version__ , req ) )
def vote ( session , nick , pid , response ) : """Votes on a poll ."""
if not response : return "You have to vote something!" if response == "n" or response == "nay" : response = "no" elif response == "y" or response == "aye" : response = "yes" poll = get_open_poll ( session , pid ) if poll is None : return "That poll doesn't exist or isn't active. Use !poll list to see valid polls" old_vote = get_response ( session , pid , nick ) if old_vote is None : session . add ( Poll_responses ( pid = pid , response = response , voter = nick ) ) return "%s voted %s." % ( nick , response ) else : if response == old_vote . response : return "You've already voted %s." % response else : msg = "%s changed their vote from %s to %s." % ( nick , old_vote . response , response ) old_vote . response = response return msg
def record ( self , action = None , method = None , timeout = None , finish_on_key = None , max_length = None , play_beep = None , trim = None , recording_status_callback = None , recording_status_callback_method = None , recording_status_callback_event = None , transcribe = None , transcribe_callback = None , ** kwargs ) : """Create a < Record > element : param action : Action URL : param method : Action URL method : param timeout : Timeout to begin recording : param finish _ on _ key : Finish recording on key : param max _ length : Max time to record in seconds : param play _ beep : Play beep : param trim : Trim the recording : param recording _ status _ callback : Status callback URL : param recording _ status _ callback _ method : Status callback URL method : param recording _ status _ callback _ event : Recording status callback events : param transcribe : Transcribe the recording : param transcribe _ callback : Transcribe callback URL : param kwargs : additional attributes : returns : < Record > element"""
return self . nest ( Record ( action = action , method = method , timeout = timeout , finish_on_key = finish_on_key , max_length = max_length , play_beep = play_beep , trim = trim , recording_status_callback = recording_status_callback , recording_status_callback_method = recording_status_callback_method , recording_status_callback_event = recording_status_callback_event , transcribe = transcribe , transcribe_callback = transcribe_callback , ** kwargs ) )
def get_details ( self ) : """Overrides the method in Failure so as to add a few details about the wrapped function and outcome"""
if isinstance ( self . validation_outcome , Exception ) : if isinstance ( self . validation_outcome , Failure ) : # do not say again what was the value , it is already mentioned inside : ) end_str = '' else : end_str = ' for value [{value}]' . format ( value = self . wrong_value ) contents = 'Function [{wrapped}] raised [{exception}: {details}]{end}.' '' . format ( wrapped = get_callable_name ( self . wrapped_func ) , exception = type ( self . validation_outcome ) . __name__ , details = self . validation_outcome , end = end_str ) else : contents = 'Function [{wrapped}] returned [{result}] for value [{value}].' '' . format ( wrapped = get_callable_name ( self . wrapped_func ) , result = self . validation_outcome , value = self . wrong_value ) return contents
def zip_patterns ( self , patterns ) : """Append suffix to patterns in dictionary if we are in a conditional FP tree ."""
suffix = self . root . value if suffix is not None : # We are in a conditional tree . new_patterns = { } for key in patterns . keys ( ) : new_patterns [ tuple ( sorted ( list ( key ) + [ suffix ] ) ) ] = patterns [ key ] return new_patterns return patterns
def database_url ( self ) : """Returns a " database URL " for use with DJ - Database - URL and similar libraries ."""
return 'postgres://{}:{}@{}/{}' . format ( self . user , self . password , self . name , self . database )
def Q_weir_V_Shen ( h1 , angle = 90 ) : r'''Calculates the flow rate across a V - notch ( triangular ) weir from the height of the liquid above the tip of the notch , and with the angle of the notch . Most of these type of weir are 90 degrees . Model from [ 1 ] _ as reproduced in [ 2 ] _ . Flow rate is given by : . . math : : Q = C \ tan \ left ( \ frac { \ theta } { 2 } \ right ) \ sqrt { g } ( h _ 1 + k ) ^ { 2.5} Parameters h1 : float Height of the fluid above the notch [ m ] angle : float , optional Angle of the notch [ degrees ] Returns Q : float Volumetric flow rate across the weir [ m ^ 3 / s ] Notes angles = [ 20 , 40 , 60 , 80 , 100] Cs = [ 0.59 , 0.58 , 0.575 , 0.575 , 0.58] k = [ 0.0028 , 0.0017 , 0.0012 , 0.001 , 0.001] The following limits apply to the use of this equation : h1 > = 0.05 m h2 > 0.45 m h1 / h2 < = 0.4 m b > 0.9 m . . math : : \ frac { h _ 1 } { b } \ tan \ left ( \ frac { \ theta } { 2 } \ right ) < 2 Flows are lower than obtained by the curves at http : / / www . lmnoeng . com / Weirs / vweir . php . Examples > > > Q _ weir _ V _ Shen ( 0.6 , angle = 45) 0.21071725775478228 References . . [ 1 ] Shen , John . " Discharge Characteristics of Triangular - Notch Thin - Plate Weirs : Studies of Flow to Water over Weirs and Dams . " USGS Numbered Series . Water Supply Paper . U . S . Geological Survey : U . S . G . P . O . , 1981 . . [ 2 ] Blevins , Robert D . Applied Fluid Dynamics Handbook . New York , N . Y . : Van Nostrand Reinhold Co . , 1984.'''
C = interp ( angle , angles_Shen , Cs_Shen ) k = interp ( angle , angles_Shen , k_Shen ) return C * tan ( radians ( angle ) / 2 ) * g ** 0.5 * ( h1 + k ) ** 2.5
def deseq2_size_factors ( counts , meta , design ) : """Get size factors for counts using DESeq2. Parameters counts : pandas . DataFrame Counts to pass to DESeq2. meta : pandas . DataFrame Pandas dataframe whose index matches the columns of counts . This is passed to DESeq2 ' s colData . design : str Design like ~ subject _ id that will be passed to DESeq2 . The design variables should match columns in meta . Returns sf : pandas . Series Series whose index matches the columns of counts and whose values are the size factors from DESeq2 . Divide each column by its size factor to obtain normalized counts ."""
import rpy2 . robjects as r from rpy2 . robjects import pandas2ri pandas2ri . activate ( ) r . r ( 'suppressMessages(library(DESeq2))' ) r . globalenv [ 'counts' ] = counts r . globalenv [ 'meta' ] = meta r . r ( 'dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, ' 'design={})' . format ( design ) ) r . r ( 'dds = estimateSizeFactors(dds)' ) r . r ( 'sf = sizeFactors(dds)' ) sf = r . globalenv [ 'sf' ] return pd . Series ( sf , index = counts . columns )
def update_contact ( self , contact_id , email = None , name = None ) : """Update a current contact : param contact _ id : contact id : param email : user email : param name : user name"""
params = { } if email is not None : params [ 'email' ] = email if name is not None : params [ 'name' ] = name url = self . CONTACTS_ID_URL % contact_id connection = Connection ( self . token ) connection . set_url ( self . production , url ) connection . add_header ( 'Content-Type' , 'application/json' ) connection . add_params ( params ) return connection . patch_request ( )
def bgblack ( cls , string , auto = False ) : """Color - code entire string . : param str string : String to colorize . : param bool auto : Enable auto - color ( dark / light terminal ) . : return : Class instance for colorized string . : rtype : Color"""
return cls . colorize ( 'bgblack' , string , auto = auto )
def image_size ( self , pnmfile ) : """Get width and height of pnm file . simeon @ homebox src > pnmfile / tmp / 214-2 . png / tmp / 214-2 . png : PPM raw , 100 by 100 maxval 255"""
pout = os . popen ( self . shellsetup + self . pnmfile + ' ' + pnmfile , 'r' ) pnmfileout = pout . read ( 200 ) pout . close ( ) m = re . search ( ', (\d+) by (\d+) ' , pnmfileout ) if ( m is None ) : raise IIIFError ( text = "Bad output from pnmfile when trying to get size." ) w = int ( m . group ( 1 ) ) h = int ( m . group ( 2 ) ) # print " pnmfile output = % s " % ( pnmfileout ) # print " image size = % d , % d " % ( w , h ) return ( w , h )
def _get ( self , url ) : """Handles api . football - data . org requests"""
req = requests . get ( RequestHandler . BASE_URL + url , headers = self . headers ) status_code = req . status_code if status_code == requests . codes . ok : return req elif status_code == requests . codes . bad : raise APIErrorException ( 'Invalid request. Check parameters.' ) elif status_code == requests . codes . forbidden : raise APIErrorException ( 'This resource is restricted' ) elif status_code == requests . codes . not_found : raise APIErrorException ( 'This resource does not exist. Check parameters' ) elif status_code == requests . codes . too_many_requests : raise APIErrorException ( 'You have exceeded your allowed requests per minute/day' )
def parse_json ( self , req , name , field ) : """Pull a json value from the request ."""
json_data = self . _cache . get ( "json" ) if json_data is None : try : self . _cache [ "json" ] = json_data = core . parse_json ( req . body , req . charset ) except json . JSONDecodeError as e : if e . doc == "" : return core . missing else : return self . handle_invalid_json_error ( e , req ) if json_data is None : return core . missing return core . get_value ( json_data , name , field , allow_many_nested = True )
def set ( self , key , value , * , flags = None ) : """Sets the Key to the given Value Parameters : key ( str ) : Key to set value ( Payload ) : Value to set , It will be encoded by flags flags ( int ) : Flags to set with value"""
self . append ( { "Verb" : "set" , "Key" : key , "Value" : encode_value ( value , flags , base64 = True ) . decode ( "utf-8" ) , "Flags" : flags } ) return self
def set_summary ( self ) : """Parses summary and set value"""
try : self . summary = self . soup . find ( 'itunes:summary' ) . string except AttributeError : self . summary = None
def parse ( self , kv ) : """Parses key value string into dict Examples : > > parser . parse ( ' test1 . test2 = value ' ) { ' test1 ' : { ' test2 ' : ' value ' } } > > parser . parse ( ' test = value ' ) { ' test ' : ' value ' }"""
key , val = kv . split ( self . kv_sep , 1 ) keys = key . split ( self . keys_sep ) for k in reversed ( keys ) : val = { k : val } return val
def find_iter_window ( bitstream , pattern , max_pos = None ) : """> > > pattern = list ( bytes2bit _ strings ( " B " ) ) > > > bitstream = bytes2bit _ strings ( " AAABCCC " ) > > > find _ iter _ window ( bitstream , pattern ) 24 > > > " " . join ( list ( bitstream2string ( bitstream ) ) ) ' CCC ' > > > find _ iter _ window ( bytes2bit _ strings ( " HELLO ! " ) , list ( bytes2bit _ strings ( " LO " ) ) ) 24 > > > find _ iter _ window ( bytes2bit _ strings ( " HELLO ! " ) , list ( bytes2bit _ strings ( " LO " ) ) , max _ pos = 16) Traceback ( most recent call last ) : MaxPosArraived : 17 > > > find _ iter _ window ( bytes2bit _ strings ( " HELLO ! " ) , list ( bytes2bit _ strings ( " X " ) ) ) Traceback ( most recent call last ) : PatternNotFound : 40"""
assert isinstance ( bitstream , ( collections . Iterable , types . GeneratorType ) ) assert isinstance ( pattern , ( list , tuple ) ) window_size = len ( pattern ) pos = - 1 for pos , data in enumerate ( iter_window ( bitstream , window_size ) ) : # print pos , data , pattern if data == pattern : return pos if max_pos is not None and pos > max_pos : raise MaxPosArraived ( pos ) raise PatternNotFound ( pos )
def full_research_organism ( soup ) : "research - organism list including inline tags , such as italic"
if not raw_parser . research_organism_keywords ( soup ) : return [ ] return list ( map ( node_contents_str , raw_parser . research_organism_keywords ( soup ) ) )