signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def topic_exists ( name , region = None , key = None , keyid = None , profile = None ) : '''Check to see if an SNS topic exists . CLI example : : salt myminion boto3 _ sns . topic _ exists mytopic region = us - east - 1'''
topics = list_topics ( region = region , key = key , keyid = keyid , profile = profile ) return name in list ( topics . values ( ) + topics . keys ( ) )
def convert_bidirectional ( builder , layer , input_names , output_names , keras_layer ) : """Convert a bidirectional layer from keras to coreml . Currently assumes the units are LSTMs . Parameters keras _ layer : layer A keras layer object . builder : NeuralNetworkBuilder A neural network builder object ."""
input_size = keras_layer . input_shape [ - 1 ] lstm_layer = keras_layer . forward_layer if ( type ( lstm_layer ) != keras . layers . recurrent . LSTM ) : raise TypeError ( 'Bidirectional layers only supported with LSTM' ) if lstm_layer . go_backwards : raise TypeError ( ' \'go_backwards\' mode not supported with Bidirectional layers' ) output_all = keras_layer . return_sequences hidden_size = lstm_layer . output_dim # output _ size = lstm _ layer . output _ dim * 2 if lstm_layer . consume_less not in [ 'cpu' , 'gpu' ] : raise ValueError ( 'Cannot convert Keras layer with consume_less = %s' % keras_layer . consume_less ) # Keras : I C F O ; W _ x , W _ h , b # CoreML : I F O G ; W _ h and W _ x are separated # Keras has all forward weights , followed by backward in the same order W_h , W_x , b = ( [ ] , [ ] , [ ] ) if lstm_layer . consume_less == 'cpu' : W_h . append ( keras_layer . get_weights ( ) [ 1 ] . T ) W_h . append ( keras_layer . get_weights ( ) [ 7 ] . T ) W_h . append ( keras_layer . get_weights ( ) [ 10 ] . T ) W_h . append ( keras_layer . get_weights ( ) [ 4 ] . T ) W_x . append ( keras_layer . get_weights ( ) [ 0 ] . T ) W_x . append ( keras_layer . get_weights ( ) [ 6 ] . T ) W_x . append ( keras_layer . get_weights ( ) [ 9 ] . T ) W_x . append ( keras_layer . get_weights ( ) [ 3 ] . T ) b . append ( keras_layer . get_weights ( ) [ 2 ] ) b . append ( keras_layer . get_weights ( ) [ 8 ] ) b . append ( keras_layer . get_weights ( ) [ 11 ] ) b . append ( keras_layer . get_weights ( ) [ 5 ] ) else : keras_W_h = keras_layer . get_weights ( ) [ 1 ] . T W_h . append ( keras_W_h [ 0 * hidden_size : ] [ : hidden_size ] ) W_h . append ( keras_W_h [ 1 * hidden_size : ] [ : hidden_size ] ) W_h . append ( keras_W_h [ 3 * hidden_size : ] [ : hidden_size ] ) W_h . append ( keras_W_h [ 2 * hidden_size : ] [ : hidden_size ] ) keras_W_x = keras_layer . get_weights ( ) [ 0 ] . T W_x . append ( keras_W_x [ 0 * hidden_size : ] [ : hidden_size ] ) W_x . append ( keras_W_x [ 1 * hidden_size : ] [ : hidden_size ] ) W_x . append ( keras_W_x [ 3 * hidden_size : ] [ : hidden_size ] ) W_x . append ( keras_W_x [ 2 * hidden_size : ] [ : hidden_size ] ) keras_b = keras_layer . get_weights ( ) [ 2 ] b . append ( keras_b [ 0 * hidden_size : ] [ : hidden_size ] ) b . append ( keras_b [ 1 * hidden_size : ] [ : hidden_size ] ) b . append ( keras_b [ 3 * hidden_size : ] [ : hidden_size ] ) b . append ( keras_b [ 2 * hidden_size : ] [ : hidden_size ] ) W_h_back , W_x_back , b_back = ( [ ] , [ ] , [ ] ) if keras_layer . backward_layer . consume_less == 'cpu' : back_weights = keras_layer . backward_layer . get_weights ( ) W_h_back . append ( back_weights [ 1 ] . T ) W_h_back . append ( back_weights [ 7 ] . T ) W_h_back . append ( back_weights [ 10 ] . T ) W_h_back . append ( back_weights [ 4 ] . T ) W_x_back . append ( back_weights [ 0 ] . T ) W_x_back . append ( back_weights [ 6 ] . T ) W_x_back . append ( back_weights [ 9 ] . T ) W_x_back . append ( back_weights [ 3 ] . T ) b_back . append ( back_weights [ 2 ] ) b_back . append ( back_weights [ 8 ] ) b_back . append ( back_weights [ 11 ] ) b_back . append ( back_weights [ 5 ] ) else : keras_W_h = keras_layer . backward_layer . get_weights ( ) [ 1 ] . T W_h_back . append ( keras_W_h [ 0 * hidden_size : ] [ : hidden_size ] ) W_h_back . append ( keras_W_h [ 1 * hidden_size : ] [ : hidden_size ] ) W_h_back . append ( keras_W_h [ 3 * hidden_size : ] [ : hidden_size ] ) W_h_back . append ( keras_W_h [ 2 * hidden_size : ] [ : hidden_size ] ) keras_W_x = keras_layer . backward_layer . get_weights ( ) [ 0 ] . T W_x_back . append ( keras_W_x [ 0 * hidden_size : ] [ : hidden_size ] ) W_x_back . append ( keras_W_x [ 1 * hidden_size : ] [ : hidden_size ] ) W_x_back . append ( keras_W_x [ 3 * hidden_size : ] [ : hidden_size ] ) W_x_back . append ( keras_W_x [ 2 * hidden_size : ] [ : hidden_size ] ) keras_b = keras_layer . backward_layer . get_weights ( ) [ 2 ] b_back . append ( keras_b [ 0 * hidden_size : ] [ : hidden_size ] ) b_back . append ( keras_b [ 1 * hidden_size : ] [ : hidden_size ] ) b_back . append ( keras_b [ 3 * hidden_size : ] [ : hidden_size ] ) b_back . append ( keras_b [ 2 * hidden_size : ] [ : hidden_size ] ) # Set activation type inner_activation_str = _get_recurrent_activation_name_from_keras ( lstm_layer . inner_activation ) activation_str = _get_recurrent_activation_name_from_keras ( lstm_layer . activation ) # Add to the network builder . add_bidirlstm ( name = layer , W_h = W_h , W_x = W_x , b = b , W_h_back = W_h_back , W_x_back = W_x_back , b_back = b_back , hidden_size = hidden_size , input_size = input_size , input_names = input_names , output_names = output_names , inner_activation = inner_activation_str , cell_state_update_activation = activation_str , output_activation = activation_str , output_all = output_all )
def put ( self , key , value , ttl = - 1 ) : """Associates the specified value with the specified key in this map . If the map previously contained a mapping for the key , the old value is replaced by the specified value . If ttl is provided , entry will expire and get evicted after the ttl . * * Warning : This method returns a clone of the previous value , not the original ( identically equal ) value previously put into the map . * * * * Warning : This method uses _ _ hash _ _ and _ _ eq _ _ methods of binary form of the key , not the actual implementations of _ _ hash _ _ and _ _ eq _ _ defined in key ' s class . * * : param key : ( object ) , the specified key . : param value : ( object ) , the value to associate with the key . : param ttl : ( int ) , maximum time in seconds for this entry to stay , if not provided , the value configured on server side configuration will be used ( optional ) . : return : ( object ) , previous value associated with key or ` ` None ` ` if there was no mapping for key ."""
check_not_none ( key , "key can't be None" ) check_not_none ( value , "value can't be None" ) key_data = self . _to_data ( key ) value_data = self . _to_data ( value ) return self . _put_internal ( key_data , value_data , ttl )
def _jacobian_both ( nodes , degree , dimension ) : r"""Compute : math : ` s ` and : math : ` t ` partial of : math : ` B ` . . . note : : There is also a Fortran implementation of this function , which will be used if it can be built . Args : nodes ( numpy . ndarray ) : Array of nodes in a surface . degree ( int ) : The degree of the surface . dimension ( int ) : The dimension the surface lives in . Returns : numpy . ndarray : Nodes of the Jacobian surfaces in B | eacute | zier form ."""
_ , num_nodes = nodes . shape result = np . empty ( ( 2 * dimension , num_nodes - degree - 1 ) , order = "F" ) result [ : dimension , : ] = jacobian_s ( nodes , degree , dimension ) result [ dimension : , : ] = jacobian_t ( nodes , degree , dimension ) return result
def make_exporter_resources ( nb_name , out_folder , images_folder = None ) : """Creates resources dict for the exporter"""
resources = defaultdict ( str ) resources [ 'metadata' ] = defaultdict ( str ) resources [ 'metadata' ] [ 'name' ] = nb_name resources [ 'metadata' ] [ 'path' ] = out_folder # This results in images like AB _ 5_1 . png for a notebook called AB . ipynb resources [ 'unique_key' ] = nb_name resources [ 'output_files_dir' ] = images_folder return resources
def handle_comm_opened ( comm , msg ) : """Static method , called when a widget is constructed ."""
version = msg . get ( 'metadata' , { } ) . get ( 'version' , '' ) if version . split ( '.' ) [ 0 ] != PROTOCOL_VERSION_MAJOR : raise ValueError ( "Incompatible widget protocol versions: received version %r, expected version %r" % ( version , __protocol_version__ ) ) data = msg [ 'content' ] [ 'data' ] state = data [ 'state' ] # Find the widget class to instantiate in the registered widgets widget_class = Widget . widget_types . get ( state [ '_model_module' ] , state [ '_model_module_version' ] , state [ '_model_name' ] , state [ '_view_module' ] , state [ '_view_module_version' ] , state [ '_view_name' ] ) widget = widget_class ( comm = comm ) if 'buffer_paths' in data : _put_buffers ( state , data [ 'buffer_paths' ] , msg [ 'buffers' ] ) widget . set_state ( state )
def sgd_entropic_regularization ( a , b , M , reg , batch_size , numItermax , lr ) : '''Compute the sgd algorithm to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem : . . math : : \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s . t . \ gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the ( ns , nt ) metric cost matrix - : math : ` \ Omega ` is the entropic regularization term with : math : ` \ Omega ( \ gamma ) = \ sum _ { i , j } \ gamma _ { i , j } \ log ( \ gamma _ { i , j } ) ` - a and b are source and target weights ( sum to 1) Parameters a : np . ndarray ( ns , ) source measure b : np . ndarray ( nt , ) target measure M : np . ndarray ( ns , nt ) cost matrix reg : float number Regularization term > 0 batch _ size : int number size of the batch numItermax : int number number of iteration lr : float number learning rate Returns alpha : np . ndarray ( ns , ) dual variable beta : np . ndarray ( nt , ) dual variable Examples > > > n _ source = 7 > > > n _ target = 4 > > > reg = 1 > > > numItermax = 20000 > > > lr = 0.1 > > > batch _ size = 3 > > > log = True > > > a = ot . utils . unif ( n _ source ) > > > b = ot . utils . unif ( n _ target ) > > > rng = np . random . RandomState ( 0) > > > X _ source = rng . randn ( n _ source , 2) > > > Y _ target = rng . randn ( n _ target , 2) > > > M = ot . dist ( X _ source , Y _ target ) > > > sgd _ dual _ pi , log = stochastic . solve _ dual _ entropic ( a , b , M , reg , batch _ size , numItermax , lr , log ) > > > print ( log [ ' alpha ' ] , log [ ' beta ' ] ) > > > print ( sgd _ dual _ pi ) References [ Seguy et al . , 2018 ] : International Conference on Learning Representation ( 2018 ) , arXiv preprint arxiv : 1711.02283.'''
n_source = np . shape ( M ) [ 0 ] n_target = np . shape ( M ) [ 1 ] cur_alpha = np . zeros ( n_source ) cur_beta = np . zeros ( n_target ) for cur_iter in range ( numItermax ) : k = np . sqrt ( cur_iter + 1 ) batch_alpha = np . random . choice ( n_source , batch_size , replace = False ) batch_beta = np . random . choice ( n_target , batch_size , replace = False ) update_alpha , update_beta = batch_grad_dual ( a , b , M , reg , cur_alpha , cur_beta , batch_size , batch_alpha , batch_beta ) cur_alpha [ batch_alpha ] += ( lr / k ) * update_alpha [ batch_alpha ] cur_beta [ batch_beta ] += ( lr / k ) * update_beta [ batch_beta ] return cur_alpha , cur_beta
def generate_threshold_mask ( hist ) : '''Masking array elements when equal 0.0 or greater than 10 times the median Parameters hist : array _ like Input data . Returns masked array Returns copy of the array with masked elements .'''
masked_array = np . ma . masked_values ( hist , 0 ) masked_array = np . ma . masked_greater ( masked_array , 10 * np . ma . median ( hist ) ) logging . info ( 'Masking %d pixel(s)' , np . ma . count_masked ( masked_array ) ) return np . ma . getmaskarray ( masked_array )
from typing import List def collect_prefixes ( word : str ) -> List [ str ] : """Generates a list of all prefixes of the input string in ascending order of their length Args : word ( str ) : The string from which prefixes are to be generated Returns : List [ str ] : The list of all possible prefixes of the input string Example : > > > collect _ prefixes ( ' abc ' ) [ ' a ' , ' ab ' , ' abc ' ]"""
return [ word [ : i + 1 ] for i in range ( len ( word ) ) ]
def get_complexes ( self ) : """Extract INDRA Complex Statements ."""
qstr = "$.events.frames[@.type is 'complex-assembly']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue # Due to an issue with the REACH output serialization # ( though seemingly not with the raw mentions ) , sometimes # a redundant complex - assembly event is reported which can # be recognized by the missing direct flag , which we can filter # for here if epistemics . get ( 'direct' ) is None : continue annotations , context = self . _get_annot_context ( r ) args = r [ 'arguments' ] sentence = r [ 'verbose-text' ] members = [ ] agent_coordinates = [ ] for a in args : agent , coords = self . _get_agent_from_entity ( a [ 'arg' ] ) members . append ( agent ) agent_coordinates . append ( coords ) annotations [ 'agents' ] [ 'coords' ] = agent_coordinates ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) stmt = Complex ( members , ev ) self . statements . append ( stmt )
def _get_right ( cls ) : # type : ( _ MetaRule ) - > List [ object ] """Get right part of the rule . : param cls : Rule for which return the right side . : return : Symbols on the right side of the array . : raise RuleNotDefinedException : If the rule is not defined . : raise CantCreateSingleRuleException : If the rule consists of more rules . : raise NotASingleSymbolException : If number of symbols on the left is more ."""
if cls . _traverse : return [ cls . toSymbol ] if len ( cls . rules ) > 1 : raise CantCreateSingleRuleException ( cls ) return cls . rules [ 0 ] [ 1 ]
def get_availability_zone ( ) : """Gets the AWS Availability Zone ID for this system : return : ( str ) Availability Zone ID where this system lives"""
log = logging . getLogger ( mod_logger + '.get_availability_zone' ) # Exit if not running on AWS if not is_aws ( ) : log . info ( 'This machine is not running in AWS, exiting...' ) return availability_zone_url = metadata_url + 'placement/availability-zone' try : response = urllib . urlopen ( availability_zone_url ) except ( IOError , OSError ) as ex : msg = 'Unable to query URL to get Availability Zone: {u}\n{e}' . format ( u = availability_zone_url , e = ex ) log . error ( msg ) return # Check the code if response . getcode ( ) != 200 : msg = 'There was a problem querying url: {u}, returned code: {c}, unable to get the Availability Zone' . format ( u = availability_zone_url , c = response . getcode ( ) ) log . error ( msg ) return availability_zone = response . read ( ) return availability_zone
def run_mace_smothr ( x , y , bass_enhancement = 0.0 ) : # pylint : disable = unused - argument """Run the FORTRAN SMOTHR ."""
N = len ( x ) weight = numpy . ones ( N ) results = numpy . zeros ( N ) flags = numpy . zeros ( ( N , 7 ) ) mace . smothr ( 1 , x , y , weight , results , flags ) return results
def parse_key ( string ) : """Split a key into path elements : - a . b . c = > a , b , c - a . " b . c " = > a , QuotedKey ( " b . c " ) if . is any of the special characters : $ } [ ] : = + # ` ^ ? ! @ * & . - " a " = > a - a . b . " c " = > a , b , c ( special case ) : param str : : return :"""
special_characters = '$}[]:=+#`^?!@*&.' tokens = re . findall ( r'"[^"]+"|[^{special_characters}]+' . format ( special_characters = re . escape ( special_characters ) ) , string ) def contains_special_character ( token ) : return any ( ( c in special_characters ) for c in token ) return [ token if contains_special_character ( token ) else token . strip ( '"' ) for token in tokens ]
def x ( self , x ) : """Project x"""
if x is None or x <= 0 or self . log10_xmax - self . log10_xmin == 0 : return None return ( self . width * ( log10 ( x ) - self . log10_xmin ) / ( self . log10_xmax - self . log10_xmin ) )
def set_values ( self , values ) : """expects a list of 2 - tuples"""
self . values = values self . height = len ( self . values ) * 14 self . _max = max ( rec [ 1 ] for rec in values ) if values else dt . timedelta ( 0 )
def _set_arrayorder ( obj , arrayorder = 'C' ) : """Set the memory order of all np . ndarrays in a tofu object"""
msg = "Arg arrayorder must be in ['C','F']" assert arrayorder in [ 'C' , 'F' ] , msg d = obj . to_dict ( strip = - 1 ) account = { 'Success' : [ ] , 'Failed' : [ ] } for k , v in d . items ( ) : if type ( v ) is np . array and v . ndim > 1 : try : if arrayorder == 'C' : d [ k ] = np . ascontiguousarray ( v ) else : d [ k ] = np . asfortranarray ( v ) account [ 'Success' ] . append ( k ) except Exception as err : warnings . warn ( str ( err ) ) account [ 'Failed' ] . append ( k ) return d , account
def _classify_arithmetic_load ( self , regs_init , regs_fini , mem_fini , written_regs , read_regs ) : """Classify arithmetic - load gadgets ."""
matches = [ ] # Check for " dst _ reg < - dst _ reg OP mem [ src _ reg + offset ] " pattern . for op_name , op_fn in self . _binary_ops . items ( ) : for dst_reg , dst_val in regs_fini . items ( ) : # Make sure the * dst * register was read and written . if dst_reg not in written_regs or dst_reg not in read_regs : continue dst_size = self . _arch_regs_size [ dst_reg ] for addr in mem_fini . get_addresses ( ) : success , val = mem_fini . try_read ( addr , dst_size // 8 ) if success and dst_val == op_fn ( regs_init [ dst_reg ] , val ) & ( 2 ** dst_size - 1 ) : for src_reg , src_val in regs_init . items ( ) : # Make sure the * src * register was read . if src_reg not in read_regs : continue # Check restrictions . if self . _arch_regs_size [ src_reg ] != self . _address_size : continue offset = ( addr - src_val ) & ( 2 ** self . _address_size - 1 ) src_reg_ir = ReilRegisterOperand ( src_reg , self . _arch_regs_size [ src_reg ] ) src_off_ir = ReilImmediateOperand ( offset , self . _address_size ) dst_reg_ir = ReilRegisterOperand ( dst_reg , self . _arch_regs_size [ dst_reg ] ) matches . append ( { "src" : [ dst_reg_ir , src_reg_ir , src_off_ir ] , "dst" : [ dst_reg_ir ] , "op" : op_name } ) # Check for " dst _ reg < - dst _ reg OP mem [ offset ] " pattern . for op_name , op_fn in self . _binary_ops . items ( ) : for dst_reg , dst_val in regs_fini . items ( ) : # Make sure the * dst * register was read and written . if dst_reg not in written_regs or dst_reg not in read_regs : continue dst_size = self . _arch_regs_size [ dst_reg ] for addr in mem_fini . get_addresses ( ) : success , val = mem_fini . try_read ( addr , dst_size // 8 ) if success and dst_val == op_fn ( regs_init [ dst_reg ] , val ) & ( 2 ** dst_size - 1 ) : src_reg_ir = ReilEmptyOperand ( ) src_off_ir = ReilImmediateOperand ( addr , self . _address_size ) dst_reg_ir = ReilRegisterOperand ( dst_reg , self . _arch_regs_size [ dst_reg ] ) matches . append ( { "src" : [ dst_reg_ir , src_reg_ir , src_off_ir ] , "dst" : [ dst_reg_ir ] , "op" : op_name } ) return matches
def _evalString ( self , datetimeString , sourceTime = None ) : """Calculate the datetime based on flags set by the L { parse ( ) } routine Examples handled : : RFC822 , W3CDTF formatted dates HH : MM [ : SS ] [ am / pm ] MM / DD / YYYY DD MMMM YYYY @ type datetimeString : string @ param datetimeString : text to try and parse as more " traditional " date / time text @ type sourceTime : struct _ time @ param sourceTime : C { struct _ time } value to use as the base @ rtype : datetime @ return : calculated C { struct _ time } value or current C { struct _ time } if not parsed"""
s = datetimeString . strip ( ) now = time . localtime ( ) # Given string date is a RFC822 date if sourceTime is None : sourceTime = _parse_date_rfc822 ( s ) if sourceTime is not None : ( yr , mth , dy , hr , mn , sec , wd , yd , isdst , _ ) = sourceTime self . dateFlag = 1 if ( hr != 0 ) and ( mn != 0 ) and ( sec != 0 ) : self . timeFlag = 2 sourceTime = ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) # Given string date is a W3CDTF date if sourceTime is None : sourceTime = _parse_date_w3dtf ( s ) if sourceTime is not None : self . dateFlag = 1 self . timeFlag = 2 if sourceTime is None : s = s . lower ( ) # Given string is in the format HH : MM ( : SS ) ( am / pm ) if self . meridianFlag : if sourceTime is None : ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = now else : ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = sourceTime m = self . ptc . CRE_TIMEHMS2 . search ( s ) if m is not None : dt = s [ : m . start ( 'meridian' ) ] . strip ( ) if len ( dt ) <= 2 : hr = int ( dt ) mn = 0 sec = 0 else : hr , mn , sec = _extract_time ( m ) if hr == 24 : hr = 0 sourceTime = ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) meridian = m . group ( 'meridian' ) . lower ( ) # if ' am ' found and hour is 12 - force hour to 0 ( midnight ) if ( meridian in self . ptc . am ) and hr == 12 : sourceTime = ( yr , mth , dy , 0 , mn , sec , wd , yd , isdst ) # if ' pm ' found and hour < 12 , add 12 to shift to evening if ( meridian in self . ptc . pm ) and hr < 12 : sourceTime = ( yr , mth , dy , hr + 12 , mn , sec , wd , yd , isdst ) # invalid time if hr > 24 or mn > 59 or sec > 59 : sourceTime = now self . dateFlag = 0 self . timeFlag = 0 self . meridianFlag = False # Given string is in the format HH : MM ( : SS ) if self . timeStdFlag : if sourceTime is None : ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = now else : ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = sourceTime m = self . ptc . CRE_TIMEHMS . search ( s ) if m is not None : hr , mn , sec = _extract_time ( m ) if hr == 24 : hr = 0 if hr > 24 or mn > 59 or sec > 59 : # invalid time sourceTime = now self . dateFlag = 0 self . timeFlag = 0 else : sourceTime = ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) self . timeStdFlag = False # Given string is in the format 07/21/2006 if self . dateStdFlag : sourceTime = self . parseDate ( s ) self . dateStdFlag = False # Given string is in the format " May 23rd , 2005" if self . dateStrFlag : sourceTime = self . parseDateText ( s ) self . dateStrFlag = False # Given string is a weekday if self . weekdyFlag : ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = now start = datetime . datetime ( yr , mth , dy , hr , mn , sec ) wkdy = self . ptc . WeekdayOffsets [ s ] if wkdy > wd : qty = self . _CalculateDOWDelta ( wd , wkdy , 2 , self . ptc . DOWParseStyle , self . ptc . CurrentDOWParseStyle ) else : qty = self . _CalculateDOWDelta ( wd , wkdy , 2 , self . ptc . DOWParseStyle , self . ptc . CurrentDOWParseStyle ) target = start + datetime . timedelta ( days = qty ) wd = wkdy sourceTime = target . timetuple ( ) self . weekdyFlag = False # Given string is a natural language time string like # lunch , midnight , etc if self . timeStrFlag : if s in self . ptc . re_values [ 'now' ] : sourceTime = now else : sources = self . ptc . buildSources ( sourceTime ) if s in sources : sourceTime = sources [ s ] else : sourceTime = now self . dateFlag = 0 self . timeFlag = 0 self . timeStrFlag = False # Given string is a natural language date string like today , tomorrow . . if self . dayStrFlag : if sourceTime is None : sourceTime = now ( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = sourceTime if s in self . ptc . dayOffsets : offset = self . ptc . dayOffsets [ s ] else : offset = 0 start = datetime . datetime ( yr , mth , dy , 9 , 0 , 0 ) target = start + datetime . timedelta ( days = offset ) sourceTime = target . timetuple ( ) self . dayStrFlag = False # Given string is a time string with units like " 5 hrs 30 min " if self . unitsFlag : modifier = '' # TODO if sourceTime is None : sourceTime = now m = self . ptc . CRE_UNITS . search ( s ) if m is not None : units = m . group ( 'units' ) quantity = s [ : m . start ( 'units' ) ] sourceTime = self . _buildTime ( sourceTime , quantity , modifier , units ) self . unitsFlag = False # Given string is a time string with single char units like " 5 h 30 m " if self . qunitsFlag : modifier = '' # TODO if sourceTime is None : sourceTime = now m = self . ptc . CRE_QUNITS . search ( s ) if m is not None : units = m . group ( 'qunits' ) quantity = s [ : m . start ( 'qunits' ) ] sourceTime = self . _buildTime ( sourceTime , quantity , modifier , units ) self . qunitsFlag = False # Given string does not match anything if sourceTime is None : sourceTime = now self . dateFlag = 0 self . timeFlag = 0 return sourceTime
def copy_params_from ( self , arg_params , aux_params = None , allow_extra_params = False ) : """Copy parameters from arg _ params , aux _ params into executor ' s internal array . Parameters arg _ params : dict of str to NDArray Parameters , dict of name to NDArray of arguments . aux _ params : dict of str to NDArray , optional Parameters , dict of name to NDArray of auxiliary states . allow _ extra _ params : boolean , optional Whether allow extra parameters that are not needed by symbol . If this is True , no error will be thrown when arg _ params or aux _ params contain extra parameters that is not needed by the executor . Raises ValueError If there is additional parameters in the dict but ` ` allow _ extra _ params = False ` ` . Examples > > > # set parameters with existing model checkpoint > > > model _ prefix = ' mx _ mlp ' > > > sym , arg _ params , aux _ params = mx . model . load _ checkpoint ( model _ prefix , 0) > > > texec . copy _ params _ from ( arg _ params , aux _ params )"""
for name , array in arg_params . items ( ) : if name in self . arg_dict : dst = self . arg_dict [ name ] array . astype ( dst . dtype ) . copyto ( dst ) elif not allow_extra_params : raise ValueError ( 'Find name \"%s\" that is not in the arguments' % name ) if aux_params is None : return for name , array in aux_params . items ( ) : if name in self . aux_dict : dst = self . aux_dict [ name ] array . astype ( dst . dtype ) . copyto ( dst ) elif not allow_extra_params : raise ValueError ( 'Find name %s that is not in the auxiliary states' % name )
def zeroize ( ) : '''Resets the device to default factory settings CLI Example : . . code - block : : bash salt ' device _ name ' junos . zeroize'''
conn = __proxy__ [ 'junos.conn' ] ( ) ret = { } ret [ 'out' ] = True try : conn . cli ( 'request system zeroize' ) ret [ 'message' ] = 'Completed zeroize and rebooted' except Exception as exception : ret [ 'message' ] = 'Could not zeroize due to : "{0}"' . format ( exception ) ret [ 'out' ] = False return ret
def index ( self ) : '''Index funtion .'''
self . render ( 'index/index.html' , userinfo = self . userinfo , catalog_info = MCategory . query_all ( by_order = True ) , link = MLink . query_all ( ) , cfg = CMS_CFG , view = MPost . query_most_pic ( 20 ) , kwd = { } , )
def close ( self ) : """Close connection to server ."""
try : self . _socket . sendall ( 'quit\r\n' ) except socket . error : pass try : self . _socket . close ( ) except socket . error : pass
def get ( self , session , fields = None ) : '''taobao . fenxiao . productcats . get 查询产品线列表 查询供应商的所有产品线数据 。 根据登陆用户来查询 , 不需要其他入参'''
request = TOPRequest ( 'taobao.fenxiao.productcats.get' ) if fields : request [ 'fields' ] = fields self . create ( self . execute ( request , session ) ) return self . productcats
def overlaps ( self , other : "Interval" ) -> bool : """Does this interval overlap the other ? Overlap : . . code - block : : none S - - - - - S S - - - S S - - - S O - - - O O - - - O O - - - O Simpler method of testing is for non - overlap ! . . code - block : : none S - - - S S - - - S O - - - O O - - - O"""
return not ( self . end <= other . start or self . start >= other . end )
def find_bidi ( self , el ) : """Get directionality from element text ."""
for node in self . get_children ( el , tags = False ) : # Analyze child text nodes if self . is_tag ( node ) : # Avoid analyzing certain elements specified in the specification . direction = DIR_MAP . get ( util . lower ( self . get_attribute_by_name ( node , 'dir' , '' ) ) , None ) if ( self . get_tag ( node ) in ( 'bdi' , 'script' , 'style' , 'textarea' , 'iframe' ) or not self . is_html_tag ( node ) or direction is not None ) : continue # pragma : no cover # Check directionality of this node ' s text value = self . find_bidi ( node ) if value is not None : return value # Direction could not be determined continue # pragma : no cover # Skip ` doctype ` comments , etc . if self . is_special_string ( node ) : continue # Analyze text nodes for directionality . for c in node : bidi = unicodedata . bidirectional ( c ) if bidi in ( 'AL' , 'R' , 'L' ) : return ct . SEL_DIR_LTR if bidi == 'L' else ct . SEL_DIR_RTL return None
def on_close ( self , filename ) : """Move this file to destination folder ."""
shutil . move ( filename , self . destination_folder ) path , fn = os . path . split ( filename ) return os . path . join ( self . destination_folder , fn )
def get_molecule_hash ( self , mol ) : """Return inchi as molecular hash"""
obmol = BabelMolAdaptor ( mol ) . openbabel_mol inchi = self . _inchi_labels ( obmol ) [ 2 ] return inchi
def formatchecker_factory ( ** checkerdict ) : """Converts a dictionary of strings : checkers into a formatchecker object"""
fc = FormatChecker ( ) for format_name , checker in checkerdict . items ( ) : fc . checks ( format_name ) ( checker ) return fc
def local_score ( self , variable , parents ) : "Computes a score that measures how much a given variable is \" influenced \" by a given list of potential parents ."
var_states = self . state_names [ variable ] var_cardinality = len ( var_states ) state_counts = self . state_counts ( variable , parents ) sample_size = len ( self . data ) num_parents_states = float ( len ( state_counts . columns ) ) score = 0 for parents_state in state_counts : # iterate over df columns ( only 1 if no parents ) conditional_sample_size = sum ( state_counts [ parents_state ] ) for state in var_states : if state_counts [ parents_state ] [ state ] > 0 : score += state_counts [ parents_state ] [ state ] * ( log ( state_counts [ parents_state ] [ state ] ) - log ( conditional_sample_size ) ) score -= 0.5 * log ( sample_size ) * num_parents_states * ( var_cardinality - 1 ) return score
def match_ancestor_bank_id ( self , bank_id , match ) : """Sets the bank ` ` Id ` ` for to match banks in which the specified bank is an acestor . arg : bank _ id ( osid . id . Id ) : a bank ` ` Id ` ` arg : match ( boolean ) : ` ` true ` ` for a positive match , ` ` false ` ` for a negative match raise : NullArgument - ` ` bank _ id ` ` is ` ` null ` ` * compliance : mandatory - - This method must be implemented . *"""
# matches when the bank _ id param is an ancestor of # any bank bank_descendants = self . _get_descendant_catalog_ids ( bank_id ) identifiers = [ ObjectId ( i . identifier ) for i in bank_descendants ] self . _query_terms [ '_id' ] = { '$in' : identifiers }
def add_user ( self , users = None , groups = None ) : """Add the specified users or groups to this project role . One of ` ` users ` ` or ` ` groups ` ` must be specified . : param users : a user or users to add to the role : type users : string , list or tuple : param groups : a group or groups to add to the role : type groups : string , list or tuple"""
if users is not None and isinstance ( users , string_types ) : users = ( users , ) if groups is not None and isinstance ( groups , string_types ) : groups = ( groups , ) data = { 'user' : users } self . _session . post ( self . self , data = json . dumps ( data ) )
def LastOf ( * subcons ) : """Create an adapter which uses only the last construct . If first argument is a string it will be the name ."""
name = "seq" if isinstance ( subcons [ 0 ] , six . string_types ) : name = subcons [ 0 ] subcons = subcons [ 1 : ] return IndexingAdapter ( Sequence ( name , * subcons ) , - 1 )
def get_media_url ( request , media_id ) : """Get media URL ."""
media = Media . objects . get ( id = media_id ) return HttpResponse ( media . url . name )
def _process_templatedata ( self , node , ** _ ) : """Processes a ` TemplateData ` node , this is just a bit of as - is text to be written to the output ."""
# escape double quotes value = re . sub ( '"' , r'\\"' , node . data ) # escape new lines value = re . sub ( '\n' , r'\\n' , value ) # append value to the result self . output . write ( '__result += "' + value + '";' )
def config ( filename ) : """Construct ` Config ` object and return a list . : parse filename : A string containing the path to YAML file . : return : list"""
Config = collections . namedtuple ( 'Config' , [ 'git' , 'lock_file' , 'version' , 'name' , 'src' , 'dst' , 'files' , 'post_commands' , ] ) return [ Config ( ** d ) for d in _get_config_generator ( filename ) ]
def common_elements ( list1 , list2 ) : """The function identifies the common elements in two provided lists using a lambda function . Examples : common _ elements ( [ 1 , 2 , 3 , 5 , 7 , 8 , 9 , 10 ] , [ 1 , 2 , 4 , 8 , 9 ] ) - > [ 1 , 2 , 8 , 9] common _ elements ( [ 1 , 2 , 3 , 5 , 7 , 8 , 9 , 10 ] , [ 3 , 5 , 7 , 9 ] ) - > [ 3 , 5 , 7 , 9] common _ elements ( [ 1 , 2 , 3 , 5 , 7 , 8 , 9 , 10 ] , [ 10 , 20 , 30 , 40 ] ) - > [ 10] : param list1 : The first list of numbers . : param list2 : The second list of numbers . : return : Returns a list containing the common elements present in both input lists ."""
common_list = list ( filter ( ( lambda x : x in list1 ) , list2 ) ) return common_list
def _on_mouse_moved ( self , event ) : """mouse moved callback"""
if event . modifiers ( ) & QtCore . Qt . ControlModifier : cursor = TextHelper ( self . editor ) . word_under_mouse_cursor ( ) if ( not self . _cursor or cursor . position ( ) != self . _cursor . position ( ) ) : self . _check_word_cursor ( cursor ) self . _cursor = cursor else : self . _cursor = None self . _clear_selection ( )
def get_child_objectives ( self , objective_id = None ) : """Gets the children of the given objective . arg : objective _ id ( osid . id . Id ) : the Id to query return : ( osid . learning . ObjectiveList ) - the children of the objective raise : NotFound - objective _ id is not found raise : NullArgument - objective _ id is null raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure compliance : mandatory - This method must be implemented ."""
if objective_id is None : raise NullArgument ( ) url_path = construct_url ( 'children' , bank_id = self . _catalog_idstr , obj_id = objective_id ) return objects . ObjectiveList ( self . _get_request ( url_path ) )
def __normalize_list ( self , msg ) : """Split message to list by commas and trim whitespace ."""
if isinstance ( msg , list ) : msg = "" . join ( msg ) return list ( map ( lambda x : x . strip ( ) , msg . split ( "," ) ) )
def __pull_image_info ( self , title , imageinfo , normalized ) : """Pull image INFO from API response and insert"""
for info in imageinfo : info . update ( { 'title' : title } ) # get API normalized " from " filename for matching _from = None for norm in normalized : if title == norm [ 'to' ] : _from = norm [ 'from' ] # let ' s put all " metadata " in one member info [ 'metadata' ] = { } extmetadata = info . get ( 'extmetadata' ) if extmetadata : info [ 'metadata' ] . update ( extmetadata ) del info [ 'extmetadata' ] self . __insert_image_info ( title , _from , info )
def write_line_shp ( line_list , out_shp ) : """Export ESRI Shapefile - - Line feature"""
print ( 'Write line shapefile: %s' % out_shp ) driver = ogr_GetDriverByName ( str ( 'ESRI Shapefile' ) ) if driver is None : print ( 'ESRI Shapefile driver not available.' ) sys . exit ( 1 ) if os . path . exists ( out_shp ) : driver . DeleteDataSource ( out_shp ) ds = driver . CreateDataSource ( out_shp . rpartition ( os . sep ) [ 0 ] ) if ds is None : print ( 'ERROR Output: Creation of output file failed.' ) sys . exit ( 1 ) lyr = ds . CreateLayer ( str ( out_shp . rpartition ( os . sep ) [ 2 ] . split ( '.' ) [ 0 ] ) , None , wkbLineString ) for l in line_list : line = ogr_Geometry ( wkbLineString ) for i in l : line . AddPoint ( i [ 0 ] , i [ 1 ] ) templine = ogr_CreateGeometryFromJson ( line . ExportToJson ( ) ) feature = ogr_Feature ( lyr . GetLayerDefn ( ) ) feature . SetGeometry ( templine ) lyr . CreateFeature ( feature ) feature . Destroy ( ) ds . Destroy ( )
def finalize ( self , ** kwargs ) : """Finalize the drawing by adding a title and legend , and removing the axes objects that do not convey information about TNSE ."""
self . set_title ( "TSNE Projection of {} Documents" . format ( self . n_instances_ ) ) # Remove the ticks self . ax . set_yticks ( [ ] ) self . ax . set_xticks ( [ ] ) # Add the legend outside of the figure box . if not all ( self . classes_ == np . array ( [ self . NULL_CLASS ] ) ) : box = self . ax . get_position ( ) self . ax . set_position ( [ box . x0 , box . y0 , box . width * 0.8 , box . height ] ) manual_legend ( self , self . classes_ , self . color_values_ , loc = 'center left' , bbox_to_anchor = ( 1 , 0.5 ) )
def onCallStarted ( self , mid = None , caller_id = None , is_video_call = None , thread_id = None , thread_type = None , ts = None , metadata = None , msg = None , ) : """. . todo : : Make this work with private calls Called when the client is listening , and somebody starts a call in a group : param mid : The action ID : param caller _ id : The ID of the person who started the call : param is _ video _ call : True if it ' s video call : param thread _ id : Thread ID that the action was sent to . See : ref : ` intro _ threads ` : param thread _ type : Type of thread that the action was sent to . See : ref : ` intro _ threads ` : param ts : A timestamp of the action : param metadata : Extra metadata about the action : param msg : A full set of the data recieved : type thread _ type : models . ThreadType"""
log . info ( "{} started call in {} ({})" . format ( caller_id , thread_id , thread_type . name ) )
def create_from_json ( cls , json_str , ignore_non_defaults = True ) : """Creates a database object from a json object . The intent of this method is to allow creating a database object directly from json . Mongolia will also automatically convert any json values that are formatted using the MongoliaJSONEncoder ( for ObjectIds and datetime objects ) back to their native python data types . Note : if using AngularJS , make sure to pass json back using ` angular . toJson ( obj ) ` instead of ` JSON . stringify ( obj ) ` since angular sometimes adds ` $ $ hashkey ` to javascript objects and this will cause a mongo error due to the " $ " prefix in keys . @ param json _ str : the json string containing the new object to use for creating the new object @ param ignore _ non _ defaults : if this is True and the database object has non - empty DEFAULTS , then any top - level keys of the create json that do not appear in DEFAULTS will also be excluded in creation"""
create_dict = json . loads ( json_str , cls = MongoliaJSONDecoder , encoding = "utf-8" ) # Remove all keys not in DEFAULTS if ignore _ non _ defaults is True if cls . DEFAULTS and ignore_non_defaults : for key in frozenset ( create_dict ) . difference ( frozenset ( cls . DEFAULTS ) ) : del create_dict [ key ] cls . create ( create_dict , random_id = True )
def is_value_in ( constants_group , value ) : """Checks whether value can be found in the given constants group , which in turn , should be a Django - like choices tuple ."""
for const_value , label in constants_group : if const_value == value : return True return False
def destroy_comment ( self , access_token , comment_id ) : """doc : http : / / open . youku . com / docs / doc ? id = 42"""
url = 'https://openapi.youku.com/v2/comments/destroy.json' data = { 'client_id' : self . client_id , 'access_token' : access_token , 'comment_id' : comment_id } r = requests . post ( url , data = data ) check_error ( r ) return r . json ( ) [ 'id' ]
def update_value ( self , id_number , new_value , metadata = None ) : """Update a canned value : type id _ number : int : param id _ number : canned value ID number : type new _ value : str : param new _ value : New canned value value : type metadata : str : param metadata : Optional metadata : rtype : dict : return : an empty dictionary"""
data = { 'id' : id_number , 'new_value' : new_value } if metadata is not None : data [ 'metadata' ] = metadata return self . post ( 'updateValue' , data )
def partition_master ( incoming = True , outgoing = True ) : """Partition master ' s port alone . To keep DC / OS cluster running . : param incoming : Partition incoming traffic to master process . Default True . : param outgoing : Partition outgoing traffic from master process . Default True ."""
echo ( 'Partitioning master. Incoming:{} | Outgoing:{}' . format ( incoming , outgoing ) ) network . save_iptables ( shakedown . master_ip ( ) ) network . flush_all_rules ( shakedown . master_ip ( ) ) network . allow_all_traffic ( shakedown . master_ip ( ) ) if incoming and outgoing : network . run_iptables ( shakedown . master_ip ( ) , DISABLE_MASTER_INCOMING ) network . run_iptables ( shakedown . master_ip ( ) , DISABLE_MASTER_OUTGOING ) elif incoming : network . run_iptables ( shakedown . master_ip ( ) , DISABLE_MASTER_INCOMING ) elif outgoing : network . run_iptables ( shakedown . master_ip ( ) , DISABLE_MASTER_OUTGOING ) else : pass
def _install_maya ( use_threaded_wrapper ) : """Helper function to Autodesk Maya support"""
from maya import utils , cmds def threaded_wrapper ( func , * args , ** kwargs ) : return utils . executeInMainThreadWithResult ( func , * args , ** kwargs ) sys . stdout . write ( "Setting up Pyblish QML in Maya\n" ) if cmds . about ( version = True ) == "2018" : _remove_googleapiclient ( ) _common_setup ( "Maya" , threaded_wrapper , use_threaded_wrapper )
def _find_own_cgroups ( ) : """For all subsystems , return the information in which ( sub - ) cgroup this process is in . ( Each process is in exactly cgroup in each hierarchy . ) @ return a generator of tuples ( subsystem , cgroup )"""
try : with open ( '/proc/self/cgroup' , 'rt' ) as ownCgroupsFile : for cgroup in _parse_proc_pid_cgroup ( ownCgroupsFile ) : yield cgroup except IOError : logging . exception ( 'Cannot read /proc/self/cgroup' )
def find_in_mailbox ( cls , session , mailbox_or_id ) : """Get the users that are associated to a Mailbox . Args : session ( requests . sessions . Session ) : Authenticated session . mailbox _ or _ id ( MailboxRef or int ) : Mailbox of the ID of the mailbox to get the folders for . Returns : RequestPaginator ( output _ type = helpscout . models . User ) : Users iterator ."""
if hasattr ( mailbox_or_id , 'id' ) : mailbox_or_id = mailbox_or_id . id return cls ( '/mailboxes/%d/users.json' % mailbox_or_id , session = session , )
def matrix_rank ( model ) : """Return the rank of the model ' s stoichiometric matrix . Parameters model : cobra . Model The metabolic model under investigation ."""
s_matrix , _ , _ = con_helpers . stoichiometry_matrix ( model . metabolites , model . reactions ) return con_helpers . rank ( s_matrix )
def calc_bin ( self , _bin = None ) : """Calculate the smallest UCSC genomic bin that will contain this feature ."""
if _bin is None : try : _bin = bins . bins ( self . start , self . end , one = True ) except TypeError : _bin = None return _bin
def retrieve_mime ( self ) : """Check the mime - type to download"""
mime = self . retrieve_config ( 'mime' , 'audio' ) mimedict = { "number" : mime } # the input that parse _ for _ download expects return aux . parse_for_download ( mimedict )
def add_group ( self , group_attribs = None , parent = None ) : """Add an empty group element to the SVG ."""
if parent is None : parent = self . tree . getroot ( ) elif not self . contains_group ( parent ) : warnings . warn ( 'The requested group {0} does not belong to ' 'this Document' . format ( parent ) ) if group_attribs is None : group_attribs = { } else : group_attribs = group_attribs . copy ( ) return SubElement ( parent , '{{{0}}}g' . format ( SVG_NAMESPACE [ 'svg' ] ) , group_attribs )
def import_from_grammar_into_namespace ( grammar , namespace , aliases ) : """Returns all rules and terminals of grammar , prepended with a ' namespace ' prefix , except for those which are aliased ."""
imported_terms = dict ( grammar . term_defs ) imported_rules = { n : ( n , deepcopy ( t ) , o ) for n , t , o in grammar . rule_defs } term_defs = [ ] rule_defs = [ ] def rule_dependencies ( symbol ) : if symbol . type != 'RULE' : return [ ] try : _ , tree , _ = imported_rules [ symbol ] except KeyError : raise GrammarError ( "Missing symbol '%s' in grammar %s" % ( symbol , namespace ) ) return tree . scan_values ( lambda x : x . type in ( 'RULE' , 'TERMINAL' ) ) def get_namespace_name ( name ) : try : return aliases [ name ] . value except KeyError : if name [ 0 ] == '_' : return '_%s__%s' % ( namespace , name [ 1 : ] ) return '%s__%s' % ( namespace , name ) to_import = list ( bfs ( aliases , rule_dependencies ) ) for symbol in to_import : if symbol . type == 'TERMINAL' : term_defs . append ( [ get_namespace_name ( symbol ) , imported_terms [ symbol ] ] ) else : assert symbol . type == 'RULE' rule = imported_rules [ symbol ] for t in rule [ 1 ] . iter_subtrees ( ) : for i , c in enumerate ( t . children ) : if isinstance ( c , Token ) and c . type in ( 'RULE' , 'TERMINAL' ) : t . children [ i ] = Token ( c . type , get_namespace_name ( c ) ) rule_defs . append ( ( get_namespace_name ( symbol ) , rule [ 1 ] , rule [ 2 ] ) ) return term_defs , rule_defs
def getid ( self , language = None , version = None ) : """Return an identification string which uniquely names a manifest . This string is a combination of the manifest ' s processorArchitecture , name , publicKeyToken , version and language . Arguments : version ( tuple or list of integers ) - If version is given , use it instead of the manifest ' s version ."""
if not self . name : logger . warn ( "Assembly metadata incomplete" ) return "" id = [ ] if self . processorArchitecture : id . append ( self . processorArchitecture ) id . append ( self . name ) if self . publicKeyToken : id . append ( self . publicKeyToken ) if version or self . version : id . append ( "." . join ( [ str ( i ) for i in version or self . version ] ) ) if not language : language = self . getlanguage ( ) if language : id . append ( language ) return "_" . join ( id )
def _check_seismogenic_depths ( self , upper_depth , lower_depth ) : '''Checks the seismic depths for physical consistency : param float upper _ depth : Upper seismogenic depth ( km ) : param float lower _ depth : Lower seismogenis depth ( km )'''
# Simple check on depths if upper_depth : if upper_depth < 0. : raise ValueError ( 'Upper seismogenic depth must be greater than' ' or equal to 0.0!' ) else : self . upper_depth = upper_depth else : self . upper_depth = 0.0 if lower_depth : if lower_depth < self . upper_depth : raise ValueError ( 'Lower seismogenic depth must take a greater' ' value than upper seismogenic depth' ) else : self . lower_depth = lower_depth else : self . lower_depth = np . inf
def setReplicationPolicyResponse ( self , pid , policy , serialVersion , vendorSpecific = None ) : """CNReplication . setReplicationPolicy ( session , pid , policy , serialVersion ) → boolean https : / / releases . dataone . org / online / api - docume ntation - v2.0.1 / apis / CN _ APIs . html # CNReplication . setReplicationPolicy . Args : pid : policy : serialVersion : vendorSpecific : Returns :"""
mmp_dict = { 'policy' : ( 'policy.xml' , policy . toxml ( 'utf-8' ) ) , 'serialVersion' : ( str ( serialVersion ) ) , } return self . PUT ( [ 'replicaPolicies' , pid ] , fields = mmp_dict , headers = vendorSpecific )
def getOntologyByName ( self , name ) : """Returns an ontology by name"""
if name not in self . _ontologyNameMap : raise exceptions . OntologyNameNotFoundException ( name ) return self . _ontologyNameMap [ name ]
def from_qs ( cls , qs , ** kwargs ) : """Creates a new queryset using class ` cls ` using ` qs ' ` data . : param qs : The query set to clone : keyword kwargs : The kwargs to pass to _ clone method"""
assert issubclass ( cls , QuerySet ) , "%s is not a QuerySet subclass" % cls assert isinstance ( qs , QuerySet ) , "qs has to be an instance of queryset" return qs . _clone ( klass = cls , ** kwargs )
def convert_gru ( builder , layer , input_names , output_names , keras_layer ) : """Convert a GRU layer from keras to coreml . Parameters keras _ layer : layer A keras layer object . builder : NeuralNetworkBuilder A neural network builder object ."""
hidden_size = keras_layer . output_dim input_size = keras_layer . input_shape [ - 1 ] output_all = keras_layer . return_sequences reverse_input = keras_layer . go_backwards if keras_layer . consume_less not in [ 'cpu' , 'gpu' ] : raise ValueError ( 'Cannot convert Keras layer with consume_less = %s' % keras_layer . consume_less ) # Keras : Z R O # CoreML : Z R O W_h , W_x , b = ( [ ] , [ ] , [ ] ) if keras_layer . consume_less == 'cpu' : W_x . append ( keras_layer . get_weights ( ) [ 0 ] . T ) W_x . append ( keras_layer . get_weights ( ) [ 3 ] . T ) W_x . append ( keras_layer . get_weights ( ) [ 6 ] . T ) W_h . append ( keras_layer . get_weights ( ) [ 1 ] . T ) W_h . append ( keras_layer . get_weights ( ) [ 4 ] . T ) W_h . append ( keras_layer . get_weights ( ) [ 7 ] . T ) b . append ( keras_layer . get_weights ( ) [ 2 ] ) b . append ( keras_layer . get_weights ( ) [ 5 ] ) b . append ( keras_layer . get_weights ( ) [ 8 ] ) else : print ( 'consume less not implemented' ) # Set actication type inner_activation_str = _get_recurrent_activation_name_from_keras ( keras_layer . inner_activation ) activation_str = _get_recurrent_activation_name_from_keras ( keras_layer . activation ) # Add to the network builder . add_gru ( name = layer , W_h = W_h , W_x = W_x , b = b , input_size = input_size , hidden_size = hidden_size , input_names = input_names , output_names = output_names , activation = activation_str , inner_activation = inner_activation_str , output_all = output_all , reverse_input = reverse_input )
def _g_3 ( self ) : """omega3 < omega < omega4"""
# return 3 * ( 1.0 - self . _ n _ 3 ( ) ) / ( self . _ vertices _ omegas [ 3 ] - self . _ omega ) return ( 3 * self . _f ( 1 , 3 ) * self . _f ( 2 , 3 ) / ( self . _vertices_omegas [ 3 ] - self . _vertices_omegas [ 0 ] ) )
def list_accounts_add ( self , id , account_ids ) : """Add the account ( s ) given in ` account _ ids ` to the list ."""
id = self . __unpack_id ( id ) if not isinstance ( account_ids , list ) : account_ids = [ account_ids ] account_ids = list ( map ( lambda x : self . __unpack_id ( x ) , account_ids ) ) params = self . __generate_params ( locals ( ) , [ 'id' ] ) self . __api_request ( 'POST' , '/api/v1/lists/{0}/accounts' . format ( id ) , params )
def main ( args = None ) : """Entry point for ` fuel - convert ` script . This function can also be imported and used from Python . Parameters args : iterable , optional ( default : None ) A list of arguments that will be passed to Fuel ' s conversion utility . If this argument is not specified , ` sys . argv [ 1 : ] ` will be used ."""
built_in_datasets = dict ( converters . all_converters ) if fuel . config . extra_converters : for name in fuel . config . extra_converters : extra_datasets = dict ( importlib . import_module ( name ) . all_converters ) if any ( key in built_in_datasets for key in extra_datasets . keys ( ) ) : raise ValueError ( 'extra converters conflict in name with ' 'built-in converters' ) built_in_datasets . update ( extra_datasets ) parser = argparse . ArgumentParser ( description = 'Conversion script for built-in datasets.' ) subparsers = parser . add_subparsers ( ) parent_parser = argparse . ArgumentParser ( add_help = False ) parent_parser . add_argument ( "-d" , "--directory" , help = "directory in which input files reside" , type = str , default = os . getcwd ( ) ) convert_functions = { } for name , fill_subparser in built_in_datasets . items ( ) : subparser = subparsers . add_parser ( name , parents = [ parent_parser ] , help = 'Convert the {} dataset' . format ( name ) ) subparser . add_argument ( "-o" , "--output-directory" , help = "where to save the dataset" , type = str , default = os . getcwd ( ) , action = CheckDirectoryAction ) subparser . add_argument ( "-r" , "--output_filename" , help = "new name of the created dataset" , type = str , default = None ) # Allows the parser to know which subparser was called . subparser . set_defaults ( which_ = name ) convert_functions [ name ] = fill_subparser ( subparser ) args = parser . parse_args ( args ) args_dict = vars ( args ) if args_dict [ 'output_filename' ] is not None and os . path . splitext ( args_dict [ 'output_filename' ] ) [ 1 ] not in ( '.hdf5' , '.hdf' , '.h5' ) : args_dict [ 'output_filename' ] += '.hdf5' if args_dict [ 'output_filename' ] is None : args_dict . pop ( 'output_filename' ) convert_function = convert_functions [ args_dict . pop ( 'which_' ) ] try : output_paths = convert_function ( ** args_dict ) except MissingInputFiles as e : intro = "The following required files were not found:\n" message = "\n" . join ( [ intro ] + [ " * " + f for f in e . filenames ] ) message += "\n\nDid you forget to run fuel-download?" parser . error ( message ) # Tag the newly - created file ( s ) with H5PYDataset version and command - line # options for output_path in output_paths : h5file = h5py . File ( output_path , 'a' ) interface_version = H5PYDataset . interface_version . encode ( 'utf-8' ) h5file . attrs [ 'h5py_interface_version' ] = interface_version fuel_convert_version = converters . __version__ . encode ( 'utf-8' ) h5file . attrs [ 'fuel_convert_version' ] = fuel_convert_version command = [ os . path . basename ( sys . argv [ 0 ] ) ] + sys . argv [ 1 : ] h5file . attrs [ 'fuel_convert_command' ] = ( ' ' . join ( command ) . encode ( 'utf-8' ) ) h5file . flush ( ) h5file . close ( )
def A ( self ) : """Spectral VAR coefficients . . . math : : \ mathbf { A } ( f ) = \ mathbf { I } - \ sum _ { k = 1 } ^ { p } \ mathbf { a } ^ { ( k ) } \mathrm{e}^{-2\pi f}"""
return fft ( np . dstack ( [ np . eye ( self . m ) , - self . b ] ) , self . nfft * 2 - 1 ) [ : , : , : self . nfft ]
def get_application_instance ( ) : """Returns the current ` QApplication < http : / / doc . qt . nokia . com / qapplication . html > ` _ instance or create one if it doesn ' t exists . : return : Application instance . : rtype : QApplication"""
instance = QApplication . instance ( ) if not instance : instance = QApplication ( sys . argv ) return instance
def process_input ( input , output = None , ivmlist = None , updatewcs = True , prodonly = False , wcskey = None , ** workinplace ) : """Create the full input list of filenames after verifying and converting files as needed ."""
newfilelist , ivmlist , output , oldasndict , origflist = buildFileListOrig ( input , output = output , ivmlist = ivmlist , wcskey = wcskey , updatewcs = updatewcs , ** workinplace ) if not newfilelist : buildEmptyDRZ ( input , output ) return None , None , output # run all WCS updating - - Now done in buildFileList # pydr _ input = _ process _ input _ wcs ( newfilelist , wcskey , updatewcs ) pydr_input = newfilelist # AsnTable will handle the case when output = = None if not oldasndict : # and output is not None : oldasndict = asnutil . ASNTable ( pydr_input , output = output ) oldasndict . create ( ) asndict = update_member_names ( oldasndict , pydr_input ) asndict [ 'original_file_names' ] = origflist # Build output filename drz_extn = '_drz.fits' for img in newfilelist : # special case logic to automatically recognize when _ flc . fits files # are provided as input and produce a _ drc . fits file instead if '_flc.fits' in img : drz_extn = '_drc.fits' break if output in [ None , '' ] : output = fileutil . buildNewRootname ( asndict [ 'output' ] , extn = drz_extn ) else : if '.fits' in output . lower ( ) : pass elif drz_extn [ : 4 ] not in output . lower ( ) : output = fileutil . buildNewRootname ( output , extn = drz_extn ) log . info ( 'Setting up output name: %s' % output ) return asndict , ivmlist , output
def authenticate ( func , c , expose_request = False ) : """A decorator that facilitates authentication per method . Setting C { expose _ request } to C { True } will set the underlying request object ( if there is one ) , usually HTTP and set it to the first argument of the authenticating callable . If there is no request object , the default is C { None } . @ raise TypeError : C { func } and authenticator must be callable ."""
if not python . callable ( func ) : raise TypeError ( 'func must be callable' ) if not python . callable ( c ) : raise TypeError ( 'Authenticator must be callable' ) attr = func if isinstance ( func , types . UnboundMethodType ) : attr = func . im_func if expose_request is True : c = globals ( ) [ 'expose_request' ] ( c ) setattr ( attr , '_pyamf_authenticator' , c ) return func
def run_migrations_online ( ) : """Run migrations in ' online ' mode . In this scenario we need to create an Engine and associate a connection with the context ."""
from uliweb . manage import make_simple_application from uliweb import orm , settings # engine = engine _ from _ config ( # config . get _ section ( config . config _ ini _ section ) , # prefix = ' sqlalchemy . ' , # poolclass = pool . NullPool ) name = config . get_main_option ( "engine_name" ) make_simple_application ( project_dir = '.' ) target_metadata = orm . get_metadata ( name ) connection = orm . get_connection ( name ) . connect ( ) # connection = engine . connect ( ) context . configure ( connection = connection , target_metadata = target_metadata , compare_server_default = True , include_object = uliweb_include_object , # compare _ server _ default = uliweb _ compare _ server _ default , ) try : with context . begin_transaction ( ) : context . run_migrations ( ) finally : connection . close ( )
def create_node ( self , name , project_id , node_id , * args , ** kwargs ) : """Create a new node : param name : Node name : param project _ id : Project identifier : param node _ id : restore a node identifier"""
if node_id in self . _nodes : return self . _nodes [ node_id ] project = ProjectManager . instance ( ) . get_project ( project_id ) if node_id and isinstance ( node_id , int ) : # old project with ( yield from BaseManager . _convert_lock ) : node_id = yield from self . convert_old_project ( project , node_id , name ) if not node_id : node_id = str ( uuid4 ( ) ) node = self . _NODE_CLASS ( name , node_id , project , self , * args , ** kwargs ) if asyncio . iscoroutinefunction ( node . create ) : yield from node . create ( ) else : node . create ( ) self . _nodes [ node . id ] = node project . add_node ( node ) return node
def get_as_list ( self , tag_name ) : """Return the value of a tag , making sure that it ' s a list . Absent tags are returned as an empty - list ; single tags are returned as a one - element list . The returned list is a copy , and modifications do not affect the original object ."""
val = self . get ( tag_name , [ ] ) if isinstance ( val , list ) : return val [ : ] else : return [ val ]
def read_node_label_matrix ( file_path , separator , number_of_nodes ) : """Reads node - label pairs in csv format and returns a list of tuples and a node - label matrix . Inputs : - file _ path : The path where the node - label matrix is stored . - separator : The delimiter among values ( e . g . " , " , " \t " , " " ) - number _ of _ nodes : The number of nodes of the full graph . It is possible that not all nodes are labelled . Outputs : - node _ label _ matrix : The node - label associations in a NumPy array of tuples format . - number _ of _ categories : The number of categories / classes the nodes may belong to . - labelled _ node _ indices : A NumPy array containing the labelled node indices ."""
# Open file file_row_generator = get_file_row_generator ( file_path , separator ) # Initialize lists for row and column sparse matrix arguments row = list ( ) col = list ( ) append_row = row . append append_col = col . append # Populate the arrays for file_row in file_row_generator : node = np . int64 ( file_row [ 0 ] ) label = np . int64 ( file_row [ 1 ] ) # Add label append_row ( node ) append_col ( label ) number_of_categories = len ( set ( col ) ) # I assume that there are no missing labels . There may be missing nodes . labelled_node_indices = np . array ( list ( set ( row ) ) ) row = np . array ( row , dtype = np . int64 ) col = np . array ( col , dtype = np . int64 ) data = np . ones_like ( row , dtype = np . float64 ) # Array count should start from 0. row -= 1 col -= 1 labelled_node_indices -= 1 # Form sparse adjacency matrix node_label_matrix = sparse . coo_matrix ( ( data , ( row , col ) ) , shape = ( number_of_nodes , number_of_categories ) ) node_label_matrix = node_label_matrix . tocsr ( ) return node_label_matrix , number_of_categories , labelled_node_indices
def get_label ( self , label_id , request_data , project = None ) : """GetLabel . Get a single deep label . : param str label _ id : Unique identifier of label : param : class : ` < TfvcLabelRequestData > < azure . devops . v5_0 . tfvc . models . TfvcLabelRequestData > ` request _ data : maxItemCount : param str project : Project ID or project name : rtype : : class : ` < TfvcLabel > < azure . devops . v5_0 . tfvc . models . TfvcLabel > `"""
route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' ) if label_id is not None : route_values [ 'labelId' ] = self . _serialize . url ( 'label_id' , label_id , 'str' ) query_parameters = { } if request_data is not None : if request_data . label_scope is not None : query_parameters [ 'requestData.labelScope' ] = request_data . label_scope if request_data . name is not None : query_parameters [ 'requestData.name' ] = request_data . name if request_data . owner is not None : query_parameters [ 'requestData.owner' ] = request_data . owner if request_data . item_label_filter is not None : query_parameters [ 'requestData.itemLabelFilter' ] = request_data . item_label_filter if request_data . max_item_count is not None : query_parameters [ 'requestData.maxItemCount' ] = request_data . max_item_count if request_data . include_links is not None : query_parameters [ 'requestData.includeLinks' ] = request_data . include_links response = self . _send ( http_method = 'GET' , location_id = 'a5d9bd7f-b661-4d0e-b9be-d9c16affae54' , version = '5.0' , route_values = route_values , query_parameters = query_parameters ) return self . _deserialize ( 'TfvcLabel' , response )
def parse_at_element ( self , element , # type : ET . Element state # type : _ ProcessorState ) : # type : ( . . . ) - > Any """Parse the provided element as an aggregate ."""
parsed_dict = self . _dictionary . parse_at_element ( element , state ) return self . _converter . from_dict ( parsed_dict )
def ErrorMsg ( ) : """Helper to get a nice traceback as string"""
import traceback limit = None _type , value , tb = sys . exc_info ( ) _list = traceback . format_tb ( tb , limit ) + traceback . format_exception_only ( _type , value ) return "Traceback (innermost last):\n" + "%-20s %s" % ( " " . join ( _list [ : - 1 ] ) , _list [ - 1 ] )
def to_string ( self , buf = None , columns = None , col_space = None , header = True , index = True , na_rep = 'NaN' , formatters = None , float_format = None , sparsify = None , index_names = True , justify = None , max_rows = None , max_cols = None , show_dimensions = False , decimal = '.' , line_width = None ) : """Render a DataFrame to a console - friendly tabular output . % ( shared _ params ) s line _ width : int , optional Width to wrap a line in characters . % ( returns ) s See Also to _ html : Convert DataFrame to HTML . Examples > > > d = { ' col1 ' : [ 1 , 2 , 3 ] , ' col2 ' : [ 4 , 5 , 6 ] } > > > df = pd . DataFrame ( d ) > > > print ( df . to _ string ( ) ) col1 col2 0 1 4 1 2 5 2 3 6"""
formatter = fmt . DataFrameFormatter ( self , buf = buf , columns = columns , col_space = col_space , na_rep = na_rep , formatters = formatters , float_format = float_format , sparsify = sparsify , justify = justify , index_names = index_names , header = header , index = index , max_rows = max_rows , max_cols = max_cols , show_dimensions = show_dimensions , decimal = decimal , line_width = line_width ) formatter . to_string ( ) if buf is None : result = formatter . buf . getvalue ( ) return result
def has_local_job_refs ( io_hash ) : ''': param io _ hash : input / output hash : type io _ hash : dict : returns : boolean indicating whether any job - based object references are found in * io _ hash *'''
q = [ ] for field in io_hash : if is_job_ref ( io_hash [ field ] ) : if get_job_from_jbor ( io_hash [ field ] ) . startswith ( 'localjob' ) : return True elif isinstance ( io_hash [ field ] , list ) or isinstance ( io_hash [ field ] , dict ) : q . append ( io_hash [ field ] ) while len ( q ) > 0 : thing = q . pop ( ) if isinstance ( thing , list ) : for i in range ( len ( thing ) ) : if is_job_ref ( thing [ i ] ) : if get_job_from_jbor ( thing [ i ] ) . startswith ( 'localjob' ) : return True elif isinstance ( thing [ i ] , list ) or isinstance ( thing [ i ] , dict ) : q . append ( thing [ i ] ) else : for field in thing : if is_job_ref ( thing [ field ] ) : if get_job_from_jbor ( thing [ field ] ) . startswith ( 'localjob' ) : return True elif isinstance ( thing [ field ] , list ) or isinstance ( thing [ field ] , dict ) : q . append ( thing [ field ] ) return False
def _display_status ( normalized_data , stream ) : """print status message from docker - py stream ."""
if 'Pull complete' in normalized_data [ 'status' ] or 'Download complete' in normalized_data [ 'status' ] : stream . write ( "\n" ) if 'id' in normalized_data : stream . write ( "%s - " % normalized_data [ 'id' ] ) stream . write ( "{0}\n" . format ( normalized_data [ 'status' ] ) )
def get_sqlview ( self , uid , execute = False , var = None , criteria = None , merge = False ) : """GET SQL View data : param uid : sqlView UID : param execute : materialize sqlView before downloading its data : param var : for QUERY types , a dict of variables to query the sqlView : param criteria : for VIEW / MATERIALIZED _ VIEW types , a dict of criteria to filter the sqlView : param merge : If true , return a list containing all pages instead of one page . Defaults to False . : return : a list OR generator where _ _ next _ _ is a ' row ' of the SQL View"""
params = { } sqlview_type = self . get ( 'sqlViews/{}' . format ( uid ) , params = { 'fields' : 'type' } ) . json ( ) . get ( 'type' ) if sqlview_type == 'QUERY' : if not isinstance ( var , dict ) : raise ClientException ( "Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}" ) var = [ '{}:{}' . format ( k , v ) for k , v in var . items ( ) ] params [ 'var' ] = var if execute : raise ClientException ( "SQL view of type QUERY, no view to create (no execute=True)" ) else : # MATERIALIZED _ VIEW / VIEW if criteria : if not isinstance ( criteria , dict ) : raise ClientException ( "Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }" ) criteria = [ '{}:{}' . format ( k , v ) for k , v in criteria . items ( ) ] params [ 'criteria' ] = criteria if execute : # materialize self . post ( 'sqlViews/{}/execute' . format ( uid ) ) def page_generator ( ) : with closing ( self . get ( 'sqlViews/{}/data' . format ( uid ) , file_type = 'csv' , params = params , stream = True ) ) as r : # do not need to use unicodecsv . DictReader as data comes in bytes already reader = DictReader ( codecs . iterdecode ( r . iter_lines ( ) , 'utf-8' ) , delimiter = ',' , quotechar = '"' ) for row in reader : yield row if not merge : return page_generator ( ) else : return list ( page_generator ( ) )
def get_most_distinct_words ( vocab , topic_word_distrib , doc_topic_distrib , doc_lengths , n = None ) : """Order the words from ` vocab ` by " distinctiveness score " ( Chuang et al . 2012 ) from most to least distinctive . Optionally only return the ` n ` most distinctive words . J . Chuang , C . Manning , J . Heer 2012 : " Termite : Visualization Techniques for Assessing Textual Topic Models " """
return _words_by_distinctiveness_score ( vocab , topic_word_distrib , doc_topic_distrib , doc_lengths , n )
def configure ( self , accountID , ** kwargs ) : """Set the client - configurable portions of an Account . Args : accountID : Account Identifier alias : Client - defined alias ( name ) for the Account marginRate : The string representation of a decimal number . Returns : v20 . response . Response containing the results from submitting the request"""
request = Request ( 'PATCH' , '/v3/accounts/{accountID}/configuration' ) request . set_path_param ( 'accountID' , accountID ) body = EntityDict ( ) if 'alias' in kwargs : body . set ( 'alias' , kwargs [ 'alias' ] ) if 'marginRate' in kwargs : body . set ( 'marginRate' , kwargs [ 'marginRate' ] ) request . set_body_dict ( body . dict ) response = self . ctx . request ( request ) if response . content_type is None : return response if not response . content_type . startswith ( "application/json" ) : return response jbody = json . loads ( response . raw_body ) parsed_body = { } # Parse responses as defined by the API specification if str ( response . status ) == "200" : if jbody . get ( 'clientConfigureTransaction' ) is not None : parsed_body [ 'clientConfigureTransaction' ] = self . ctx . transaction . ClientConfigureTransaction . from_dict ( jbody [ 'clientConfigureTransaction' ] , self . ctx ) if jbody . get ( 'lastTransactionID' ) is not None : parsed_body [ 'lastTransactionID' ] = jbody . get ( 'lastTransactionID' ) elif str ( response . status ) == "400" : if jbody . get ( 'clientConfigureRejectTransaction' ) is not None : parsed_body [ 'clientConfigureRejectTransaction' ] = self . ctx . transaction . ClientConfigureRejectTransaction . from_dict ( jbody [ 'clientConfigureRejectTransaction' ] , self . ctx ) if jbody . get ( 'lastTransactionID' ) is not None : parsed_body [ 'lastTransactionID' ] = jbody . get ( 'lastTransactionID' ) if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "403" : if jbody . get ( 'clientConfigureRejectTransaction' ) is not None : parsed_body [ 'clientConfigureRejectTransaction' ] = self . ctx . transaction . ClientConfigureRejectTransaction . from_dict ( jbody [ 'clientConfigureRejectTransaction' ] , self . ctx ) if jbody . get ( 'lastTransactionID' ) is not None : parsed_body [ 'lastTransactionID' ] = jbody . get ( 'lastTransactionID' ) if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "401" : if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "404" : if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) elif str ( response . status ) == "405" : if jbody . get ( 'errorCode' ) is not None : parsed_body [ 'errorCode' ] = jbody . get ( 'errorCode' ) if jbody . get ( 'errorMessage' ) is not None : parsed_body [ 'errorMessage' ] = jbody . get ( 'errorMessage' ) # Unexpected response status else : parsed_body = jbody response . body = parsed_body return response
def wallet_destroy ( self , wallet ) : """Destroys * * wallet * * and all contained accounts . . enable _ control required : param wallet : Wallet to destroy : type wallet : str : raises : : py : exc : ` nano . rpc . RPCException ` > > > rpc . wallet _ destroy ( . . . wallet = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " True"""
wallet = self . _process_value ( wallet , 'wallet' ) payload = { "wallet" : wallet } resp = self . call ( 'wallet_destroy' , payload ) return resp == { }
def is_comment_deleted ( comid ) : """Return True of the comment is deleted . Else False : param comid : ID of comment to check"""
query = """SELECT status from "cmtRECORDCOMMENT" WHERE id=%s""" params = ( comid , ) res = run_sql ( query , params ) if res and res [ 0 ] [ 0 ] != 'ok' : return True return False
def milestones ( self ) : '''Array of all milestones'''
if self . cache [ 'milestones' ] : return self . cache [ 'milestones' ] milestone_xml = self . bc . list_milestones ( self . id ) milestones = [ ] for node in ET . fromstring ( milestone_xml ) . findall ( "milestone" ) : milestones . append ( Milestone ( node ) ) milestones . sort ( ) milestones . reverse ( ) self . cache [ 'milestones' ] = milestones return self . cache [ 'milestones' ]
def serial_adapters ( self , serial_adapters ) : """Sets the number of Serial adapters for this IOU VM . : param serial _ adapters : number of adapters"""
self . _serial_adapters . clear ( ) for _ in range ( 0 , serial_adapters ) : self . _serial_adapters . append ( SerialAdapter ( interfaces = 4 ) ) log . info ( 'IOU "{name}" [{id}]: number of Serial adapters changed to {adapters}' . format ( name = self . _name , id = self . _id , adapters = len ( self . _serial_adapters ) ) ) self . _adapters = self . _ethernet_adapters + self . _serial_adapters
def transformation_get ( node_id ) : """Get all the transformations of a node . The node id must be specified in the url . You can also pass transformation _ type ."""
exp = Experiment ( session ) # get the parameters transformation_type = request_parameter ( parameter = "transformation_type" , parameter_type = "known_class" , default = models . Transformation , ) if type ( transformation_type ) == Response : return transformation_type # check the node exists node = models . Node . query . get ( node_id ) if node is None : return error_response ( error_type = "/node/transformations, " "node {} does not exist" . format ( node_id ) ) # execute the request transformations = node . transformations ( type = transformation_type ) try : # ping the experiment exp . transformation_get_request ( node = node , transformations = transformations ) session . commit ( ) except Exception : return error_response ( error_type = "/node/transformations GET failed" , participant = node . participant ) # return the data return success_response ( transformations = [ t . __json__ ( ) for t in transformations ] )
def infer_dtypes ( fit , model = None ) : """Infer dtypes from Stan model code . Function strips out generated quantities block and searchs for ` int ` dtypes after stripping out comments inside the block ."""
pattern_remove_comments = re . compile ( r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"' , re . DOTALL | re . MULTILINE ) stan_integer = r"int" stan_limits = r"(?:\<[^\>]+\>)*" # ignore group : 0 or more < . . . . > stan_param = r"([^;=\s\[]+)" # capture group : ends = " ; " , " = " , " [ " or whitespace stan_ws = r"\s*" # 0 or more whitespace pattern_int = re . compile ( "" . join ( ( stan_integer , stan_ws , stan_limits , stan_ws , stan_param ) ) , re . IGNORECASE ) if model is None : stan_code = fit . get_stancode ( ) model_pars = fit . model_pars else : stan_code = model . program_code model_pars = fit . param_names # remove deprecated comments stan_code = "\n" . join ( line if "#" not in line else line [ : line . find ( "#" ) ] for line in stan_code . splitlines ( ) ) stan_code = re . sub ( pattern_remove_comments , "" , stan_code ) stan_code = stan_code . split ( "generated quantities" ) [ - 1 ] dtypes = re . findall ( pattern_int , stan_code ) dtypes = { item . strip ( ) : "int" for item in dtypes if item . strip ( ) in model_pars } return dtypes
def query ( self , event , pk , ts = None ) : """Query the last update timestamp of an event pk . You can pass a timestamp to only look for events later than that within the same namespace . : param event : the event name . : param pk : the pk value for query . : param ts : query event pk after ts , default to None which will query all span of current namespace ."""
key = self . _keygen ( event , ts ) pk_ts = self . r . zscore ( key , pk ) return int ( pk_ts ) if pk_ts else None
def t_php_STRING ( t ) : r'[ A - Za - z _ ] [ \ w _ ] *'
t . type = reserved_map . get ( t . value . upper ( ) , 'STRING' ) return t
def prepare_ec ( oo , sizes , M ) : """This prepares EC and converts from contig _ id to an index ."""
tour = range ( len ( oo ) ) tour_sizes = np . array ( [ sizes . sizes [ x ] for x in oo ] ) tour_M = M [ oo , : ] [ : , oo ] return tour , tour_sizes , tour_M
async def reconnect ( self , force = True , connmark = None ) : '''Can call without delegate'''
if connmark is None : connmark = self . connmark self . scheduler . emergesend ( ConnectionControlEvent ( self , ConnectionControlEvent . RECONNECT , force , connmark ) )
def perform_permissions_check ( self , user , obj , perms ) : """Performs the permissions check ."""
return self . request . forum_permission_handler . can_update_topics_to_announces ( obj , user )
def binaryFiles ( self , path , minPartitions = None ) : """. . note : : Experimental Read a directory of binary files from HDFS , a local file system ( available on all nodes ) , or any Hadoop - supported file system URI as a byte array . Each file is read as a single record and returned in a key - value pair , where the key is the path of each file , the value is the content of each file . . . note : : Small files are preferred , large file is also allowable , but may cause bad performance ."""
minPartitions = minPartitions or self . defaultMinPartitions return RDD ( self . _jsc . binaryFiles ( path , minPartitions ) , self , PairDeserializer ( UTF8Deserializer ( ) , NoOpSerializer ( ) ) )
def add_node_ids_as_labels ( discoursegraph ) : """Adds the ID of each node of a discourse graph as a label ( an attribute named ` ` label ` ` with the value of the node ID ) to itself . This will ignore nodes whose ID isn ' t a string or which already have a label attribute . Parameters discoursegraph : DiscourseDocumentGraph"""
for node_id , properties in discoursegraph . nodes_iter ( data = True ) : if 'label' not in properties and isinstance ( node_id , ( str , unicode ) ) : discoursegraph . node [ node_id ] [ 'label' ] = ensure_utf8 ( node_id )
def list_all_products ( cls , ** kwargs ) : """List Products Return a list of Products This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async = True > > > thread = api . list _ all _ products ( async = True ) > > > result = thread . get ( ) : param async bool : param int page : page number : param int size : page size : param str sort : page order : return : page [ Product ] If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async' ) : return cls . _list_all_products_with_http_info ( ** kwargs ) else : ( data ) = cls . _list_all_products_with_http_info ( ** kwargs ) return data
def setrange ( self , name , offset , value ) : """Overwrite bytes in the value of ` ` name ` ` starting at ` ` offset ` ` with ` ` value ` ` . If ` ` offset ` ` plus the length of ` ` value ` ` exceeds the length of the original value , the new value will be larger than before . If ` ` offset ` ` exceeds the length of the original value , null bytes will be used to pad between the end of the previous value and the start of what ' s being injected . Returns the length of the new string ."""
return self . execute_command ( 'SETRANGE' , name , offset , value )
def ramp_up_sp ( self ) : """Writing sets the ramp up setpoint . Reading returns the current value . Units are in milliseconds and must be positive . When set to a non - zero value , the motor speed will increase from 0 to 100 % of ` max _ speed ` over the span of this setpoint . The actual ramp time is the ratio of the difference between the ` speed _ sp ` and the current ` speed ` and max _ speed multiplied by ` ramp _ up _ sp ` ."""
self . _ramp_up_sp , value = self . get_attr_int ( self . _ramp_up_sp , 'ramp_up_sp' ) return value
def get_match ( self , match_id ) : """Get a multiplayer match . Parameters match _ id The ID of the match to retrieve . This is the ID that you see in a online multiplayer match summary . This does not correspond the in - game game ID ."""
return self . _make_req ( endpoints . MATCH , dict ( k = self . key , mp = match_id ) , Match )