signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def aggregate ( self , search ) : """Add aggregations representing the facets selected , including potential filters ."""
for f , facet in iteritems ( self . facets ) : agg = facet . get_aggregation ( ) agg_filter = MatchAll ( ) for field , filter in iteritems ( self . _filters ) : if f == field : continue agg_filter &= filter search . aggs . bucket ( '_filter_' + f , 'filter' , filter = agg_filter ) . bucket ( f , agg )
def create_preauth ( byval , key , by = 'name' , expires = 0 , timestamp = None ) : """Generates a zimbra preauth value : param byval : The value of the targeted user ( according to the by - parameter ) . For example : The account name , if " by " is " name " . : param key : The domain preauth key ( you can retrieve that using zmprov gd ) : param by : What type is the byval - parameter ? Valid parameters are " name " ( default ) , " id " and " foreignPrincipal " : param expires : Milliseconds when the auth token expires . Defaults to 0 for default account expiration : param timestamp : Current timestamp ( is calculated by default ) : returns : The preauth value to be used in an AuthRequest : rtype : str"""
if timestamp is None : timestamp = int ( datetime . now ( ) . strftime ( "%s" ) ) * 1000 pak = hmac . new ( codecs . latin_1_encode ( key ) [ 0 ] , ( '%s|%s|%s|%s' % ( byval , by , expires , timestamp ) ) . encode ( "utf-8" ) , hashlib . sha1 ) . hexdigest ( ) return pak
def _resolve_parameters ( parameters , blueprint ) : """Resolves CloudFormation Parameters for a given blueprint . Given a list of parameters , handles : - discard any parameters that the blueprint does not use - discard any empty values - convert booleans to strings suitable for CloudFormation Args : parameters ( dict ) : A dictionary of parameters provided by the stack definition blueprint ( : class : ` stacker . blueprint . base . Blueprint ` ) : A Blueprint object that is having the parameters applied to it . Returns : dict : The resolved parameters ."""
params = { } param_defs = blueprint . get_parameter_definitions ( ) for key , value in parameters . items ( ) : if key not in param_defs : logger . debug ( "Blueprint %s does not use parameter %s." , blueprint . name , key ) continue if value is None : logger . debug ( "Got None value for parameter %s, not submitting it " "to cloudformation, default value should be used." , key ) continue if isinstance ( value , bool ) : logger . debug ( "Converting parameter %s boolean \"%s\" to string." , key , value ) value = str ( value ) . lower ( ) params [ key ] = value return params
def normalize_query ( query ) : """Normalize query : sort params by name , remove params without value . > > > normalize _ query ( ' z = 3 & y = & x = 1 ' ) ' x = 1 & z = 3'"""
if query == '' or len ( query ) <= 2 : return '' nquery = unquote ( query , exceptions = QUOTE_EXCEPTIONS [ 'query' ] ) params = nquery . split ( '&' ) nparams = [ ] for param in params : if '=' in param : k , v = param . split ( '=' , 1 ) if k and v : nparams . append ( "%s=%s" % ( k , v ) ) nparams . sort ( ) return '&' . join ( nparams )
def GetEntries ( self , parser_mediator , data = None , ** unused_kwargs ) : """Extract data from Transmission ' s resume folder files . This is the main parsing engine for the parser . It determines if the selected file is the proper file to parse and extracts current running torrents . Transmission stores an individual Bencoded file for each active download in a folder named resume under the user ' s application data folder . Args : parser _ mediator ( ParserMediator ) : mediates interactions between parsers and other components , such as storage and dfvfs . data ( Optional [ dict [ str , object ] ] ) : bencode data values ."""
seeding_time = data . get ( 'seeding-time-seconds' , None ) event_data = TransmissionEventData ( ) event_data . destination = data . get ( 'destination' , None ) # Convert seconds to minutes . event_data . seedtime , _ = divmod ( seeding_time , 60 ) # Create timeline events based on extracted values . timestamp = data . get ( 'added-date' , None ) if timestamp : date_time = dfdatetime_posix_time . PosixTime ( timestamp = timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_ADDED ) parser_mediator . ProduceEventWithEventData ( event , event_data ) timestamp = data . get ( 'done-date' , None ) if timestamp : date_time = dfdatetime_posix_time . PosixTime ( timestamp = timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_FILE_DOWNLOADED ) parser_mediator . ProduceEventWithEventData ( event , event_data ) timestamp = data . get ( 'activity-date' , None ) if timestamp : date_time = dfdatetime_posix_time . PosixTime ( timestamp = timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_ACCESS ) parser_mediator . ProduceEventWithEventData ( event , event_data )
def check_if_outcome_already_connected ( self , from_state_id , from_outcome ) : """check if outcome of from state is not already connected : param from _ state _ id : The source state of the transition : param from _ outcome : The outcome of the source state to connect the transition to : raises exceptions . AttributeError : if the outcome of the state with the state _ id = = from _ state _ id is already connected"""
for trans_key , transition in self . transitions . items ( ) : if transition . from_state == from_state_id : if transition . from_outcome == from_outcome : raise AttributeError ( "Outcome %s of state %s is already connected" % ( str ( from_outcome ) , str ( from_state_id ) ) )
def cli ( env , identifier , allocation , port , routing_type , routing_method ) : """Edit an existing load balancer service group ."""
mgr = SoftLayer . LoadBalancerManager ( env . client ) loadbal_id , group_id = loadbal . parse_id ( identifier ) # check if any input is provided if not any ( [ allocation , port , routing_type , routing_method ] ) : raise exceptions . CLIAbort ( 'At least one property is required to be changed!' ) mgr . edit_service_group ( loadbal_id , group_id , allocation = allocation , port = port , routing_type = routing_type , routing_method = routing_method ) env . fout ( 'Load balancer service group %s is being updated!' % identifier )
def get_outfilename ( url , domain = None ) : """Construct the output filename from domain and end of path ."""
if domain is None : domain = get_domain ( url ) path = '{url.path}' . format ( url = urlparse ( url ) ) if '.' in path : tail_url = path . split ( '.' ) [ - 2 ] else : tail_url = path if tail_url : if '/' in tail_url : tail_pieces = [ x for x in tail_url . split ( '/' ) if x ] tail_url = tail_pieces [ - 1 ] # Keep length of return string below or equal to max _ len max_len = 24 if domain : max_len -= ( len ( domain ) + 1 ) if len ( tail_url ) > max_len : if '-' in tail_url : tail_pieces = [ x for x in tail_url . split ( '-' ) if x ] tail_url = tail_pieces . pop ( 0 ) if len ( tail_url ) > max_len : tail_url = tail_url [ : max_len ] else : # Add as many tail pieces that can fit tail_len = 0 for piece in tail_pieces : tail_len += len ( piece ) if tail_len <= max_len : tail_url += '-' + piece else : break else : tail_url = tail_url [ : max_len ] if domain : return '{0}-{1}' . format ( domain , tail_url ) . lower ( ) return tail_url return domain . lower ( )
def recompute ( self , quiet = False , ** kwargs ) : """Re - compute a previously computed model . You might want to do this if the kernel parameters change and the kernel is labeled as ` ` dirty ` ` . : param quiet : ( optional ) If ` ` True ` ` , return false when the computation fails . Otherwise , throw an error if something goes wrong . ( default : ` ` False ` ` )"""
if not self . computed : if not ( hasattr ( self , "_x" ) and hasattr ( self , "_yerr2" ) ) : raise RuntimeError ( "You need to compute the model first" ) try : # Update the model making sure that we store the original # ordering of the points . self . compute ( self . _x , np . sqrt ( self . _yerr2 ) , ** kwargs ) except ( ValueError , LinAlgError ) : if quiet : return False raise return True
def from_both ( cls , tags_file : str , tags_folder : str , folder : str ) -> 'TrainData' : """Load data from both a database and a structured folder"""
return cls . from_tags ( tags_file , tags_folder ) + cls . from_folder ( folder )
def statistical_inefficiency ( X , truncate_acf = True ) : """Estimates the statistical inefficiency from univariate time series X The statistical inefficiency [ 1 ] _ is a measure of the correlatedness of samples in a signal . Given a signal : math : ` { x _ t } ` with : math : ` N ` samples and statistical inefficiency : math : ` I \ in ( 0,1 ] ` , there are only : math : ` I \ cdot N ` effective or uncorrelated samples in the signal . This means that : math : ` I \ cdot N ` should be used in order to compute statistical uncertainties . See [ 2 ] _ for a review . The statistical inefficiency is computed as : math : ` I = ( 2 \t au ) ^ { - 1 } ` using the damped autocorrelation time . . 1 : \t au = \f rac { 1 } { 2 } + \ sum _ { K = 1 } ^ { N } A ( k ) \ left ( 1- \f rac { k } { N } \r ight ) where . . 1 : A ( k ) = \f rac { \ langle x _ t x _ { t + k } \r angle _ t - \ langle x ^ 2 \r angle _ t } { \ mathrm { var } ( x ) } is the autocorrelation function of the signal : math : ` { x _ t } ` , which is computed either for a single or multiple trajectories . Parameters X : float array or list of float arrays Univariate time series ( single or multiple trajectories ) truncate _ acf : bool , optional , default = True When the normalized autocorrelation function passes through 0 , it is truncated in order to avoid integrating random noise References . . [ 1 ] Anderson , T . W . : The Statistical Analysis of Time Series ( Wiley , New York , 1971) . . [ 2 ] Janke , W : Statistical Analysis of Simulations : Data Correlations and Error Estimation Quantum Simulations of Complex Many - Body Systems : From Theory to Algorithms , Lecture Notes , J . Grotendorst , D . Marx , A . Muramatsu ( Eds . ) , John von Neumann Institute for Computing , Juelich NIC Series 10 , pp . 423-445 , 2002."""
# check input assert np . ndim ( X [ 0 ] ) == 1 , 'Data must be 1-dimensional' N = _maxlength ( X ) # length # mean - free data xflat = np . concatenate ( X ) Xmean = np . mean ( xflat ) X0 = [ x - Xmean for x in X ] # moments x2m = np . mean ( xflat ** 2 ) # integrate damped autocorrelation corrsum = 0.0 for lag in range ( N ) : acf = 0.0 n = 0.0 for x in X0 : Nx = len ( x ) # length of this trajectory if ( Nx > lag ) : # only use trajectories that are long enough acf += np . sum ( x [ 0 : Nx - lag ] * x [ lag : Nx ] ) n += float ( Nx - lag ) acf /= n if acf <= 0 and truncate_acf : # zero autocorrelation . Exit break elif lag > 0 : # start integrating at lag 1 ( effect of lag 0 is contained in the 0.5 below corrsum += acf * ( 1.0 - ( float ( lag ) / float ( N ) ) ) # compute damped correlation time corrtime = 0.5 + corrsum / x2m # return statistical inefficiency return 1.0 / ( 2 * corrtime )
def fold_string ( input_string , max_width ) : """Fold a string within a maximum width . Parameters : input _ string : The string of data to go into the cell max _ width : Maximum width of cell . Data is folded into multiple lines to fit into this width . Return : String representing the folded string"""
new_string = input_string if isinstance ( input_string , six . string_types ) : if max_width < len ( input_string ) : # use textwrap to fold the string new_string = textwrap . fill ( input_string , max_width ) return new_string
def bind ( cls , app , * paths , methods = None , name = None , router = None , view = None ) : """Bind to the given application ."""
cls . app = app if cls . app is not None : for _ , m in inspect . getmembers ( cls , predicate = inspect . isfunction ) : if not hasattr ( m , ROUTE_PARAMS_ATTR ) : continue paths_ , methods_ , name_ = getattr ( m , ROUTE_PARAMS_ATTR ) name_ = name_ or ( "%s.%s" % ( cls . name , m . __name__ ) ) delattr ( m , ROUTE_PARAMS_ATTR ) cls . app . register ( * paths_ , methods = methods_ , name = name_ , handler = cls ) ( m ) @ coroutine @ functools . wraps ( cls ) def handler ( request ) : return cls ( ) . dispatch ( request , view = view ) if not paths : paths = [ "/%s" % cls . __name__ ] return routes_register ( app , handler , * paths , methods = methods , router = router , name = name or cls . name )
def read_module ( self , modulename , revision = None , extra = { } ) : """Searches for a module named ` modulename ` in the repository The module is just read , and not compiled at all . Returns the module if found , and None otherwise"""
if modulename not in self . revs : # this module doesn ' t exist in the repos at all return None elif self . revs [ modulename ] == [ ] : # this module doesn ' t exist in the repos at all , error reported return None if revision is not None : if ( modulename , revision ) in self . modules : return self . modules [ ( modulename , revision ) ] self . _ensure_revs ( self . revs [ modulename ] ) x = util . keysearch ( revision , 1 , self . revs [ modulename ] ) if x is not None : ( _revision , handle ) = x if handle == None : # this revision doesn ' t exist in the repos , error reported return None else : # this revision doesn ' t exist in the repos return None else : # get the latest revision ( revision , handle ) = self . _get_latest_rev ( self . revs [ modulename ] ) if ( modulename , revision ) in self . modules : return self . modules [ ( modulename , revision ) ] if handle [ 0 ] == 'parsed' : module = handle [ 1 ] return module else : # get it from the repos try : r = self . repository . get_module_from_handle ( handle ) ( ref , format , text ) = r if format == None : format = util . guess_format ( text ) if format == 'yin' : p = yin_parser . YinParser ( extra ) else : p = yang_parser . YangParser ( extra ) return p . parse ( self , ref , text ) except self . repository . ReadError as ex : return None
def set_status ( self , status , origin = None , force = False ) : """For programs , to set current status of the actuator . Each active program has its status in : attr : ` . program _ stack ` dictionary and the highest priority is realized in the actuator"""
if not self . slave and origin not in self . program_stack : raise ValueError ( 'Status cannot be changed directly' ) with self . _actuator_status_lock : self . logger . debug ( "set_status got through, program: %s" , origin ) self . logger . debug ( "Set_status %s %s %s" , self . name , origin , status ) if self . slave : return self . _do_change_status ( status , force ) self . logger . debug ( "Sets status %s for %s" , status , origin . name ) with self . _program_lock : self . program_status [ origin ] = status if self . program == origin : return self . _do_change_status ( status , force )
def set_state_process ( self , context , process ) : """Method to append process for a context in the IF state . : param context : It can be a layer purpose or a section ( impact function , post processor ) . : type context : str , unicode : param process : A text explain the process . : type process : str , unicode"""
LOGGER . info ( '%s: %s' % ( context , process ) ) self . state [ context ] [ "process" ] . append ( process )
def pubkey ( self , identity , ecdh = False ) : """Return public key ."""
_verify_support ( identity , ecdh ) return trezor . Trezor . pubkey ( self , identity = identity , ecdh = ecdh )
def detect_sentence_ending_saying_verbs ( edt_sent_text ) : '''Detects cases where a saying verb ( potential root of the sentence ) ends the sentence . We use a simple heuristic : if the given sentence has multiple clauses , and the last main verb in the sentence is preceded by " , but is not followed by " , then the main verb is most likely a saying verb . Examples : " See oli ainult unes , " [ vaidles ] Jan . " Ma ei maga enam Joogaga ! " [ protesteerisin ] . " Mis mõttega te jama suust välja ajate ? " [ läks ] Janil nüüd juba hari punaseks . Note that the class of saying verbs is open , so we try not rely on a listing of verbs , but rather on the conventional usage patterns of reported speech , indicated by quotation marks . Returns a dict containing word indexes of saying verbs ;'''
from estnltk . mw_verbs . utils import WordTemplate if not edt_sent_text . is_tagged ( VERB_CHAINS ) : edt_sent_text . tag_verb_chains ( ) saying_verbs = { } if len ( edt_sent_text [ VERB_CHAINS ] ) < 2 : # Skip sentences that do not have any chains , or # have only a single verb chain return saying_verbs patColon = WordTemplate ( { 'partofspeech' : '^[Z]$' , 'text' : '^:$' } ) for vid , vc in enumerate ( edt_sent_text [ VERB_CHAINS ] ) : # Look only multi - clause sentences , where the last verb chain has length 1 if len ( vc [ 'phrase' ] ) == 1 and vid == len ( edt_sent_text [ VERB_CHAINS ] ) - 1 : wid = vc [ 'phrase' ] [ 0 ] token = edt_sent_text [ WORDS ] [ wid ] clause_id = vc [ CLAUSE_IDX ] # Find corresponding clause and locations of quotation marks clause , insideEmbeddedCl = _get_clause_words ( edt_sent_text , clause_id ) quoteLeft = _detect_quotes ( edt_sent_text , wid , fromRight = False ) quoteRight = _detect_quotes ( edt_sent_text , wid , fromRight = True ) # Exclude cases , where there are double quotes within the same clause : # . . . ootab igaüks , ] [ kuidas aga kähku tagasi " varrastusse " < saaks > . ] # . . . miljonäre on ka nende seas , ] [ kes oma “ papi ” mustas äris < teenivad > . ] quotes_in_clause = [ ] for ( wid2 , token2 ) in clause : if _pat_starting_quote . match ( token2 [ TEXT ] ) or _pat_ending_quote . match ( token2 [ TEXT ] ) : quotes_in_clause . append ( wid2 ) multipleQuotes = len ( quotes_in_clause ) > 1 and quotes_in_clause [ - 1 ] == quoteLeft # If the preceding double quotes are not within the same clause , and # the verb is not within an embedded clause , and a quotation mark strictly # precedes , but none follows , then we have most likely a saying verb : # " Ma ei tea , " [ kehitan ] õlga . # " Miks jumal meid karistab ? " [ mõtles ] sir Galahad . # " Kaarsild pole teatavastki elusolend , " [ lõpetasin ] arutelu . if not multipleQuotes and not insideEmbeddedCl and ( quoteLeft != - 1 and quoteLeft + 1 == wid and quoteRight == - 1 ) : saying_verbs [ wid ] = 'se_saying_verb' return saying_verbs
def write ( u , path ) : """Write a unicode string to a file ( as utf - 8 ) ."""
print ( "writing to: %s" % path ) # This function implementation was chosen to be compatible across Python 2/3. f = open ( path , "wb" ) try : b = u . encode ( FILE_ENCODING ) f . write ( b ) finally : f . close ( )
def create_state_multi_precision ( self , index , weight ) : """Creates auxiliary state for a given weight , including FP32 high precision copy if original weight is FP16. This method is provided to perform automatic mixed precision training for optimizers that do not support it themselves . Parameters index : int An unique index to identify the weight . weight : NDArray The weight . Returns state : any obj The state associated with the weight ."""
weight_master_copy = None if self . multi_precision and weight . dtype == numpy . float16 : weight_master_copy = weight . astype ( numpy . float32 ) return ( weight_master_copy , ) + ( self . create_state ( index , weight_master_copy ) , ) if weight . dtype == numpy . float16 and not self . multi_precision : warnings . warn ( "Accumulating with float16 in optimizer can lead to " "poor accuracy or slow convergence. " "Consider using multi_precision=True option of the " "optimizer" ) return self . create_state ( index , weight )
def mission_clear_all_send ( self , target_system , target_component , force_mavlink1 = False ) : '''Delete all mission items at once . target _ system : System ID ( uint8 _ t ) target _ component : Component ID ( uint8 _ t )'''
return self . send ( self . mission_clear_all_encode ( target_system , target_component ) , force_mavlink1 = force_mavlink1 )
def _build_regular_workflow ( json_spec ) : """Precondition : json _ spec must be validated"""
workflow_id = dxpy . api . workflow_new ( json_spec ) [ "id" ] dxpy . api . workflow_close ( workflow_id ) return workflow_id
def main ( arguments = None ) : """Parse options , gather stats and show the results Takes optional parameter ` ` arguments ` ` which can be either command line string or list of options . This is very useful for testing purposes . Function returns a tuple of the form : : ( [ user _ stats ] , team _ stats ) with the list of all gathered stats objects ."""
try : # Parse options , initialize gathered stats options , header = Options ( arguments ) . parse ( ) gathered_stats = [ ] # Check for user email addresses ( command line or config ) emails = options . emails or did . base . Config ( ) . email emails = utils . split ( emails , separator = re . compile ( r"\s*,\s*" ) ) users = [ did . base . User ( email = email ) for email in emails ] # Print header and prepare team stats object for data merging utils . eprint ( header ) team_stats = UserStats ( options = options ) if options . merge : utils . header ( "Total Report" ) utils . item ( "Users: {0}" . format ( len ( users ) ) , options = options ) # Check individual user stats for user in users : if options . merge : utils . item ( user , 1 , options = options ) else : utils . header ( user ) user_stats = UserStats ( user = user , options = options ) user_stats . check ( ) team_stats . merge ( user_stats ) gathered_stats . append ( user_stats ) # Display merged team report if options . merge or options . total : if options . total : utils . header ( "Total Report" ) team_stats . show ( ) # Return all gathered stats objects return gathered_stats , team_stats except did . base . ConfigFileError as error : utils . info ( "Create at least a minimum config file {0}:\n{1}" . format ( did . base . Config . path ( ) , did . base . Config . example ( ) . strip ( ) ) ) raise
def remove_outlier ( self , data , sd_val ) : """Remove outliers from dataframe . Note 1 . This function excludes all lines with NA in all columns . Parameters data : pd . DataFrame ( ) Dataframe to remove outliers from . sd _ val : int Standard Deviation Value ( specifices how many SDs away is a point considered an outlier ) Returns pd . DataFrame ( ) Dataframe with outliers removed ."""
data = data . dropna ( ) data = data [ ( np . abs ( stats . zscore ( data ) ) < float ( sd_val ) ) . all ( axis = 1 ) ] return data
def escapePlaceholders ( self , inputString ) : """This is an internal method that escapes all the placeholders defined in MapConstants . py ."""
escaped = inputString . replace ( MapConstants . placeholder , '\\' + MapConstants . placeholder ) escaped = escaped . replace ( MapConstants . placeholderFileName , '\\' + MapConstants . placeholderFileName ) escaped = escaped . replace ( MapConstants . placeholderPath , '\\' + MapConstants . placeholderPath ) escaped = escaped . replace ( MapConstants . placeholderExtension , '\\' + MapConstants . placeholderExtension ) escaped = escaped . replace ( MapConstants . placeholderCounter , '\\' + MapConstants . placeholderCounter ) return escaped
def _format_import_example ( self , task_class ) : """Generate nodes that show a code sample demonstrating how to import the task class . Parameters task _ class : ` ` lsst . pipe . base . Task ` ` - type The Task class . Returns nodes : ` list ` of docutils nodes Docutils nodes showing a class import statement ."""
code = 'from {0.__module__} import {0.__name__}' . format ( task_class ) # This is a bare - bones version of what Sphinx ' s code - block directive # does . The ' language ' attr triggers the pygments treatment . literal_node = nodes . literal_block ( code , code ) literal_node [ 'language' ] = 'py' return [ literal_node ]
def merge_dicts ( a , b ) : """Merges the values of B into A and returns a mutated dict A . dict a - c : 0 - c : 2 e : " aaa " f : 3 dict b a : 1 - c : 3 e : " bbb " Will give an object such as : : { ' a ' : 1 , ' b ' : [ { ' c ' : 3 } ] , ' d ' : { ' e ' : " bbb " , ' f ' : 3 } } : param a : the target dictionary : param b : the dictionary to import : return : dict"""
anyconfig . merge ( a , b , ac_merge = MERGE_STRATEGY ) return a
def after ( func ) : """Run a function after the handler is invoked , is passed the response and must return an response too . Usage : : > > > # to create a reusable decorator > > > @ after . . . def gnu _ terry _ pratchett ( retval ) : . . . retval . setdefault ( ' Headers ' , { } ) [ ' X - Clacks - Overhead ' ] = ' GNU Terry Pratchett ' . . . return retval > > > @ gnu _ terry _ pratchett . . . def handler ( event , context ) : . . . return { ' body ' : ' ' } > > > handler ( { } , object ( ) ) { ' body ' : ' ' , ' Headers ' : { ' X - Clacks - Overhead ' : ' GNU Terry Pratchett ' } }"""
class AfterDecorator ( LambdaDecorator ) : def after ( self , retval ) : return func ( retval ) return AfterDecorator
def mul_table ( self , other ) : """Fast multiplication using a the LWNAF precomputation table ."""
# Get a BigInt other = coerceBigInt ( other ) if not other : return NotImplemented other %= orderG2 ( ) # Building the precomputation table , if there is not one already . if not self . _table : self . _table = lwnafTable ( ) librelic . ep2_mul_pre_lwnaf ( byref ( self . _table ) , byref ( self ) ) result = G2Element ( ) librelic . ep2_mul_fix_lwnaf ( byref ( result ) , byref ( self . _table ) , byref ( other ) ) return result
def _register_aggregate ( agg , con ) : """Register a Python class that performs aggregation in SQLite . Parameters agg : type con : sqlalchemy . Connection"""
nargs = number_of_arguments ( agg . step ) - 1 # because self con . connection . connection . create_aggregate ( agg . __name__ , nargs , agg )
async def disable_digital_reporting ( self , pin ) : """Disables digital reporting . By turning reporting off for this pin , Reporting is disabled for all 8 bits in the " port " : param pin : Pin and all pins for this port : returns : No return value"""
port = pin // 8 command = [ PrivateConstants . REPORT_DIGITAL + port , PrivateConstants . REPORTING_DISABLE ] await self . _send_command ( command )
def annotations ( self , qname = True ) : """wrapper that returns all triples for an onto . By default resources URIs are transformed into qnames"""
if qname : return sorted ( [ ( uri2niceString ( x , self . namespaces ) ) , ( uri2niceString ( y , self . namespaces ) ) , z ] for x , y , z in self . triples ) else : return sorted ( self . triples )
def _itemize ( objs ) : """Recursive helper function for farray ."""
if not isinstance ( objs , collections . Sequence ) : raise TypeError ( "expected a sequence of Function" ) isseq = [ isinstance ( obj , collections . Sequence ) for obj in objs ] if not any ( isseq ) : ftype = None for obj in objs : if ftype is None : if isinstance ( obj , BinaryDecisionDiagram ) : ftype = BinaryDecisionDiagram elif isinstance ( obj , Expression ) : ftype = Expression elif isinstance ( obj , TruthTable ) : ftype = TruthTable else : raise TypeError ( "expected valid Function inputs" ) elif not isinstance ( obj , ftype ) : raise ValueError ( "expected uniform Function types" ) return list ( objs ) , ( ( 0 , len ( objs ) ) , ) , ftype elif all ( isseq ) : items = list ( ) shape = None ftype = None for obj in objs : _items , _shape , _ftype = _itemize ( obj ) if shape is None : shape = _shape elif shape != _shape : raise ValueError ( "expected uniform farray dimensions" ) if ftype is None : ftype = _ftype elif ftype != _ftype : raise ValueError ( "expected uniform Function types" ) items += _items shape = ( ( 0 , len ( objs ) ) , ) + shape return items , shape , ftype else : raise ValueError ( "expected uniform farray dimensions" )
def strip_codes ( s : Union [ str , 'ChainedBase' ] ) -> str : """Strip all color codes from a string . Returns empty string for " falsey " inputs ( except 0 ) ."""
return codepat . sub ( '' , str ( s ) if ( s or ( s == 0 ) ) else '' )
def on_right_align_toggled ( self , chk ) : """set the horizontal alignment setting ."""
v = chk . get_active ( ) self . settings . general . set_int ( 'window-halignment' , 1 if v else 0 )
def logout ( self , redirect_to = '/' ) : '''This property will return component which will handle logout requests . It only handles POST requests and do not display any rendered content . This handler deletes session id from ` storage ` . If there is no session id provided or id is incorrect handler silently redirects to login url and does not throw any exception .'''
def _logout ( env , data ) : location = redirect_to if location is None and env . request . referer : location = env . request . referer elif location is None : location = '/' response = HTTPSeeOther ( location = str ( location ) ) self . logout_user ( env . request , response ) return response return web . match ( '/logout' , 'logout' ) | web . method ( 'post' ) | _logout
def write_k_record ( self , * args ) : """Write a K record : : writer . write _ k _ record _ extensions ( [ ( ' FXA ' , 3 ) , ( ' SIU ' , 2 ) , ( ' ENL ' , 3 ) , writer . write _ k _ record ( datetime . time ( 2 , 3 , 4 ) , [ ' 023 ' , 13 , 2 ] ) # - > J030810FXA1112SIU1315ENL # - > K02030402313002 : param time : UTC time of the k record ( default : : meth : ` ~ datetime . datetime . utcnow ` ) : param extensions : a list of extension values according to previous declaration through : meth : ` ~ aerofiles . igc . Writer . write _ k _ record _ extensions `"""
num_args = len ( args ) if num_args not in ( 1 , 2 ) : raise ValueError ( 'Invalid number of parameters received' ) if num_args == 1 : extensions = args [ 0 ] time = None else : time , extensions = args if time is None : time = datetime . datetime . utcnow ( ) record = self . format_time ( time ) if not ( isinstance ( extensions , list ) and isinstance ( self . k_record_extensions , list ) ) : raise ValueError ( 'Invalid extensions list' ) if len ( extensions ) != len ( self . k_record_extensions ) : raise ValueError ( 'Number of extensions does not match declaration' ) for type_length , value in zip ( self . k_record_extensions , extensions ) : length = type_length [ 1 ] if isinstance ( value , ( int , float ) ) : value = ( '%0' + str ( length ) + 'd' ) % value if len ( value ) != length : raise ValueError ( 'Extension value has wrong length' ) record += value self . write_record ( 'K' , record )
def _compute_residuals ( self , coefs_array , basis_kwargs , boundary_points , nodes , problem ) : """Return collocation residuals . Parameters coefs _ array : numpy . ndarray basis _ kwargs : dict problem : TwoPointBVPLike Returns resids : numpy . ndarray"""
coefs_list = self . _array_to_list ( coefs_array , problem . number_odes ) derivs , funcs = self . _construct_approximation ( basis_kwargs , coefs_list ) resids = self . _assess_approximation ( boundary_points , derivs , funcs , nodes , problem ) return resids
def remove_this_opinion ( self , opinion_id ) : """Removes the opinion for the given opinion identifier @ type opinion _ id : string @ param opinion _ id : the opinion identifier to be removed"""
for opi in self . get_opinions ( ) : if opi . get_id ( ) == opinion_id : self . node . remove ( opi . get_node ( ) ) break
def get_adjustments ( self , zero_qtr_data , requested_qtr_data , last_per_qtr , dates , assets , columns , ** kwargs ) : """Calculates both split adjustments and overwrites for all sids ."""
split_adjusted_cols_for_group = [ self . name_map [ col . name ] for col in columns if self . name_map [ col . name ] in self . _split_adjusted_column_names ] # Add all splits to the adjustment dict for this sid . split_adjusted_asof_idx = self . get_split_adjusted_asof_idx ( dates ) return super ( SplitAdjustedEstimatesLoader , self ) . get_adjustments ( zero_qtr_data , requested_qtr_data , last_per_qtr , dates , assets , columns , split_adjusted_cols_for_group = split_adjusted_cols_for_group , split_adjusted_asof_idx = split_adjusted_asof_idx )
def get_nodesitemtypeinsertion ( cls , itemgroup , indent ) -> str : """Return a string defining the required types for the given combination of an exchange item group and | Node | objects . > > > from hydpy . auxs . xmltools import XSDWriter > > > print ( XSDWriter . get _ nodesitemtypeinsertion ( . . . ' setitems ' , 1 ) ) # doctest : + ELLIPSIS < complexType name = " nodes _ setitemsType " > < sequence > < element ref = " hpcb : selections " minOccurs = " 0 " / > < element ref = " hpcb : devices " minOccurs = " 0 " / > < element name = " sim " type = " hpcb : setitemType " minOccurs = " 0" maxOccurs = " unbounded " / > < element name = " obs " type = " hpcb : setitemType " minOccurs = " 0" maxOccurs = " unbounded " / > < element name = " sim . series " type = " hpcb : setitemType " minOccurs = " 0" maxOccurs = " unbounded " / > < element name = " obs . series " type = " hpcb : setitemType " minOccurs = " 0" maxOccurs = " unbounded " / > < / sequence > < / complexType > < BLANKLINE >"""
blanks = ' ' * ( indent * 4 ) subs = [ f'{blanks}<complexType name="nodes_{itemgroup}Type">' , f'{blanks} <sequence>' , f'{blanks} <element ref="hpcb:selections"' , f'{blanks} minOccurs="0"/>' , f'{blanks} <element ref="hpcb:devices"' , f'{blanks} minOccurs="0"/>' ] type_ = 'getitemType' if itemgroup == 'getitems' else 'setitemType' for name in ( 'sim' , 'obs' , 'sim.series' , 'obs.series' ) : subs . extend ( [ f'{blanks} <element name="{name}"' , f'{blanks} type="hpcb:{type_}"' , f'{blanks} minOccurs="0"' , f'{blanks} maxOccurs="unbounded"/>' ] ) subs . extend ( [ f'{blanks} </sequence>' , f'{blanks}</complexType>' , f'' ] ) return '\n' . join ( subs )
def _scaled_bqm ( bqm , scalar , bias_range , quadratic_range , ignored_variables , ignored_interactions , ignore_offset ) : """Helper function of sample for scaling"""
bqm_copy = bqm . copy ( ) if scalar is None : scalar = _calc_norm_coeff ( bqm_copy . linear , bqm_copy . quadratic , bias_range , quadratic_range , ignored_variables , ignored_interactions ) bqm_copy . scale ( scalar , ignored_variables = ignored_variables , ignored_interactions = ignored_interactions , ignore_offset = ignore_offset ) bqm_copy . info . update ( { 'scalar' : scalar } ) return bqm_copy
def check_zero_sum_triplets ( lst : list ) : """check _ zero _ sum _ triplets receives a list of integers . It checks if the list contains three unique elements that add up to zero . Returns True if such a triplet is found , False otherwise . Args : lst ( list ) : List of integers Returns : bool : True if there are three distinct numbers that add up to zero , False otherwise . Examples : > > > check _ zero _ sum _ triplets ( [ 1 , 3 , 5 , 0 ] ) False > > > check _ zero _ sum _ triplets ( [ 1 , 3 , - 2 , 1 ] ) True > > > check _ zero _ sum _ triplets ( [ 1 , 2 , 3 , 7 ] ) False > > > check _ zero _ sum _ triplets ( [ 2 , 4 , - 5 , 3 , 9 , 7 ] ) True > > > check _ zero _ sum _ triplets ( [ 1 ] ) False"""
from itertools import combinations # Check all combinations of three distinct numbers for triplet in combinations ( lst , 3 ) : if sum ( triplet ) == 0 : return True return False
def _Aff4Read ( aff4_obj , offset , length ) : """Reads contents of given AFF4 file . Args : aff4 _ obj : An AFF4 stream instance to retrieve contents for . offset : An offset to start the reading from . length : A number of bytes to read . Reads the whole file if 0. Returns : Contents of specified AFF4 stream . Raises : TypeError : If ` aff4 _ obj ` is not an instance of AFF4 stream ."""
length = length or ( _Aff4Size ( aff4_obj ) - offset ) aff4_obj . Seek ( offset ) return aff4_obj . Read ( length )
def GenCatchallState ( self ) : """Generate string matching state rules . This sets up initial state handlers that cover both the ' INITIAL ' state and the intermediate content between fields . The lexer acts on items with precedence : - continuation characters : use the fast forward state rules . - field separators : finalize processing the field . - quotation characters : use the quotation state rules ."""
for c in self . comments : self . _AddToken ( "." , c , "PushState,EndField" , "COMMENT" ) for c in self . cont : self . _AddToken ( "." , c , "PushState" , "FWD" ) for t in self . term : self . _AddToken ( "." , t , "EndEntry" , None ) for s in self . sep : self . _AddToken ( "." , s , "EndField" , None ) for i , q in enumerate ( self . quot ) : self . _AddToken ( "." , q , "PushState" , "%s_STRING" % i ) self . _AddToken ( "." , "." , "AddToField" , None )
def find_events ( symbols , d_data , market_sym = '$SPX' , trigger = drop_below , trigger_kwargs = { } ) : '''Return dataframe of 1 ' s ( event happened ) and NaNs ( no event ) , 1 column for each symbol'''
df_close = d_data [ 'actual_close' ] ts_market = df_close [ market_sym ] print "Finding `{0}` events with kwargs={1} for {2} ticker symbols" . format ( trigger . func_name , trigger_kwargs , len ( symbols ) ) print 'Trigger docstring says:\n\n{0}\n\n' . format ( trigger . func_doc ) # Creating an empty dataframe df_events = copy . deepcopy ( df_close ) df_events = df_events * np . NAN # Time stamps for the event range ldt_timestamps = df_close . index for s_sym in symbols : if s_sym == market_sym : continue for i in range ( 1 , len ( ldt_timestamps ) ) : # Calculating the returns for this timestamp kwargs = dict ( trigger_kwargs ) kwargs [ 'price_today' ] = df_close [ s_sym ] . ix [ ldt_timestamps [ i ] ] kwargs [ 'price_yest' ] = df_close [ s_sym ] . ix [ ldt_timestamps [ i - 1 ] ] kwargs [ 'return_today' ] = ( kwargs [ 'price_today' ] / ( kwargs [ 'price_yest' ] or 1. ) ) - 1 kwargs [ 'market_price_today' ] = ts_market . ix [ ldt_timestamps [ i ] ] kwargs [ 'market_price_yest' ] = ts_market . ix [ ldt_timestamps [ i - 1 ] ] kwargs [ 'market_return_today' ] = ( kwargs [ 'market_price_today' ] / ( kwargs [ 'market_price_yest' ] or 1. ) ) - 1 if trigger ( ** kwargs ) : df_events [ s_sym ] . ix [ ldt_timestamps [ i ] ] = 1 print 'Found {0} events where priced dropped below {1}.' . format ( df_events . sum ( axis = 1 ) . sum ( axis = 0 ) , trigger_kwargs [ 'threshold' ] ) return df_events
def log ( self ) : ''': returns : The last 10 commit entries as dictionary * ' commit ' : The commit - ID * ' message ' : First line of the commit message'''
log = self . _log ( num = 10 , format = '%h::%b' ) . get ( 'stdout' ) if log : return [ dict ( commit = c , message = m ) for c , m in [ l . split ( '::' ) for l in log ] ]
def load_config ( filename ) : """load _ config This function will take in a file location on the file system and extract the JSON content found within and return a python - interpretted form of the object ."""
config = None if os . path . isfile ( filename ) and os . access ( filename , os . R_OK ) : with open ( filename , 'r' ) as fh : config = json . loads ( fh . read ( ) ) return config
def call ( self , proc_name , field_types , * args ) : """call server procedure"""
d = self . replyQueue . get ( ) packet = RequestCall ( self . charset , self . errors , d . _ipro_request_id , proc_name , 0 , * args ) self . transport . write ( bytes ( packet ) ) return d . addCallback ( self . handle_reply , self . charset , self . errors , field_types )
async def packets_from_tshark ( self , packet_callback , packet_count = None , close_tshark = True ) : """A coroutine which creates a tshark process , runs the given callback on each packet that is received from it and closes the process when it is done . Do not use interactively . Can be used in order to insert packets into your own eventloop ."""
tshark_process = await self . _get_tshark_process ( packet_count = packet_count ) try : await self . _go_through_packets_from_fd ( tshark_process . stdout , packet_callback , packet_count = packet_count ) except StopCapture : pass finally : if close_tshark : await self . _close_async ( )
def list_modules ( root_package = 'vlcp' ) : '''Walk through all the sub modules , find subclasses of vlcp . server . module . Module , list their apis through apidefs'''
pkg = __import__ ( root_package , fromlist = [ '_' ] ) module_dict = OrderedDict ( ) _server = Server ( ) for imp , module , _ in walk_packages ( pkg . __path__ , root_package + '.' ) : m = __import__ ( module , fromlist = [ '_' ] ) for name , v in vars ( m ) . items ( ) : if v is not None and isinstance ( v , type ) and issubclass ( v , Module ) and v is not Module and not isinstance ( v , _ProxyModule ) and hasattr ( v , '__dict__' ) and 'configkey' in v . __dict__ and v . __module__ == module : module_name = v . __name__ . lower ( ) if module_name not in module_dict : _inst = v ( _server ) module_info = OrderedDict ( ( ( 'class' , v . __module__ + '.' + v . __name__ ) , ( 'dependencies' , [ d . __name__ . lower ( ) for d in v . depends ] ) , ( 'classdescription' , getdoc ( v ) ) , ( 'apis' , [ ] ) ) ) if hasattr ( _inst , 'apiHandler' ) : apidefs = _inst . apiHandler . apidefs module_info [ 'apis' ] = [ ( d [ 0 ] , d [ 3 ] ) for d in apidefs if len ( d ) > 3 and not d [ 0 ] . startswith ( 'public/' ) ] module_dict [ module_name ] = module_info return module_dict
def models ( cls , api_version = DEFAULT_API_VERSION ) : """Module depends on the API version : * 2015-06-15 : : mod : ` v2015_06_15 . models < azure . mgmt . network . v2015_06_15 . models > ` * 2016-09-01 : : mod : ` v2016_09_01 . models < azure . mgmt . network . v2016_09_01 . models > ` * 2016-12-01 : : mod : ` v2016_12_01 . models < azure . mgmt . network . v2016_12_01 . models > ` * 2017-03-01 : : mod : ` v2017_03_01 . models < azure . mgmt . network . v2017_03_01 . models > ` * 2017-06-01 : : mod : ` v2017_06_01 . models < azure . mgmt . network . v2017_06_01 . models > ` * 2017-08-01 : : mod : ` v2017_08_01 . models < azure . mgmt . network . v2017_08_01 . models > ` * 2017-09-01 : : mod : ` v2017_09_01 . models < azure . mgmt . network . v2017_09_01 . models > ` * 2017-10-01 : : mod : ` v2017_10_01 . models < azure . mgmt . network . v2017_10_01 . models > ` * 2017-11-01 : : mod : ` v2017_11_01 . models < azure . mgmt . network . v2017_11_01 . models > ` * 2018-01-01 : : mod : ` v2018_01_01 . models < azure . mgmt . network . v2018_01_01 . models > ` * 2018-02-01 : : mod : ` v2018_02_01 . models < azure . mgmt . network . v2018_02_01 . models > ` * 2018-04-01 : : mod : ` v2018_04_01 . models < azure . mgmt . network . v2018_04_01 . models > `"""
if api_version == '2015-06-15' : from . v2015_06_15 import models return models elif api_version == '2016-09-01' : from . v2016_09_01 import models return models elif api_version == '2016-12-01' : from . v2016_12_01 import models return models elif api_version == '2017-03-01' : from . v2017_03_01 import models return models elif api_version == '2017-06-01' : from . v2017_06_01 import models return models elif api_version == '2017-08-01' : from . v2017_08_01 import models return models elif api_version == '2017-09-01' : from . v2017_09_01 import models return models elif api_version == '2017-10-01' : from . v2017_10_01 import models return models elif api_version == '2017-11-01' : from . v2017_11_01 import models return models elif api_version == '2018-01-01' : from . v2018_01_01 import models return models elif api_version == '2018-02-01' : from . v2018_02_01 import models return models elif api_version == '2018-04-01' : from . v2018_04_01 import models return models raise NotImplementedError ( "APIVersion {} is not available" . format ( api_version ) )
def store_blocks ( self , el , blocks , text , force_root ) : """Store the text as desired ."""
self . soft_break ( el , text ) if force_root or el . parent is None or self . content_break ( el ) : content = html . unescape ( '' . join ( text ) ) if content : blocks . append ( ( content , self . additional_context + self . construct_selector ( el ) ) ) text = [ ] return text
def scheduled ( self , offset = 0 , count = 25 ) : '''Return all the currently - scheduled jobs'''
return self . client ( 'jobs' , 'scheduled' , self . name , offset , count )
def update_common ( obj , report ) : """do updated _ at checks"""
# updated checks if obj [ 'updated_at' ] >= yesterday : report [ '_updated_today_count' ] += 1 if obj [ 'updated_at' ] >= last_month : report [ '_updated_this_month_count' ] += 1 if obj [ 'updated_at' ] >= last_year : report [ '_updated_this_year_count' ] += 1
def delete_saved_aggregation_by_slug ( request , slug ) : """Delete Saved Aggregation By Slug"""
ss = get_object_or_404 ( Aggregation , slug = slug ) ss . delete ( ) messages . success ( request , _ ( "Saved aggregation deleted." ) ) return HttpResponseRedirect ( reverse ( 'djmongo_browse_saved_aggregations_w_params' , args = ( ss . database_name , ss . collection_name ) ) )
def check_git_version ( ) : """Check the installed git version against a known - stable version . If the git version is less then ` ` MIN _ GIT _ VERSION ` ` , a warning is raised . If git is not installed at all on this system , we also raise a warning for that . The original reason why this check was introduced is because with older versions git ( < 1.9 ) , newly init - ed git repos cannot checkout from a fetched remote unless the repo has at least one commit in it . The reason for this is that before creating a commit , the HEAD refers to a refs / heads / master file which doesn ' t exist yet . . . todo : : TODO ( larsbutler ) : If we wanted to be defensive about this and favor compatibility over elegance , we could just automatically add a ` git commit ` ( empty , no message ) after every ` git init ` . I would recommend doing this in the : class : ` GitRepo ` class , not in the module - level util functions . Adding an extra commit shouldn ' t cause any problems ."""
try : version = git_version ( ) except exceptions . SimplGitCommandError : warnings . warn ( "Git does not appear to be installed!" , exceptions . GitWarning ) return ver_num = version . split ( ) [ 2 ] major , minor , _ = ver_num . split ( '.' , 2 ) major = int ( major ) minor = int ( minor ) if ( major , minor ) < MIN_GIT_VERSION : warnings . warn ( "Git version %(ver)s found. %(rec)s or greater " "is recommended for simpl/git.py" % dict ( ver = ver_num , rec = '.' . join ( ( str ( x ) for x in MIN_GIT_VERSION ) ) ) , exceptions . GitWarning )
def process_large_file ( self , local_file , parent ) : """Upload a single file using multiple processes to upload multiple chunks at the same time . Updates local _ file with it ' s remote _ id when done . : param local _ file : LocalFile : file we are uploading : param parent : LocalFolder / LocalProject : parent of the file"""
file_content_sender = FileUploader ( self . settings . config , self . settings . data_service , local_file , self . settings . watcher , self . settings . file_upload_post_processor ) remote_id = file_content_sender . upload ( self . settings . project_id , parent . kind , parent . remote_id ) local_file . set_remote_id_after_send ( remote_id )
def base64ToImage ( imgData , out_path , out_file ) : """converts a base64 string to a file"""
fh = open ( os . path . join ( out_path , out_file ) , "wb" ) fh . write ( imgData . decode ( 'base64' ) ) fh . close ( ) del fh return os . path . join ( out_path , out_file )
def is_validated ( self ) : """Returns True if this instance is validated . Note that resolving this property requires a DB query , so if you ' ve a very large amount of receipts you should prefetch ( see django ' s ` ` select _ related ` ` ) the ` ` validation ` ` field . Even so , a DB query * may * be triggered . If you need a large list of validated receipts , you should actually filter them via a QuerySet : : Receipt . objects . filter ( validation _ _ result = = RESULT _ APPROVED ) : rtype : bool"""
# Avoid the DB lookup if possible : if not self . receipt_number : return False try : return self . validation . result == ReceiptValidation . RESULT_APPROVED except ReceiptValidation . DoesNotExist : return False
def cfg ( self ) : """Load the application configuration . This method loads configuration from python module ."""
config = LStruct ( self . defaults ) module = config [ 'CONFIG' ] = os . environ . get ( CONFIGURATION_ENVIRON_VARIABLE , config [ 'CONFIG' ] ) if module : try : module = import_module ( module ) config . update ( { name : getattr ( module , name ) for name in dir ( module ) if name == name . upper ( ) and not name . startswith ( '_' ) } ) except ImportError as exc : config . CONFIG = None self . logger . error ( "Error importing %s: %s" , module , exc ) # Patch configuration from ENV for name in config : if name . startswith ( '_' ) or name != name . upper ( ) or name not in os . environ : continue try : config [ name ] = json . loads ( os . environ [ name ] ) except ValueError : pass return config
def replace ( name , pattern , repl , count = 0 , flags = 8 , bufsize = 1 , append_if_not_found = False , prepend_if_not_found = False , not_found_content = None , backup = '.bak' , show_changes = True , ignore_if_missing = False , backslash_literal = False ) : r'''Maintain an edit in a file . . . versionadded : : 0.17.0 name Filesystem path to the file to be edited . If a symlink is specified , it will be resolved to its target . pattern A regular expression , to be matched using Python ' s : py : func : ` re . search ` . . . note : : If you need to match a literal string that contains regex special characters , you may want to use salt ' s custom Jinja filter , ` ` regex _ escape ` ` . . . code - block : : jinja { { ' http : / / example . com ? foo = bar % 20baz ' | regex _ escape } } repl The replacement text count Maximum number of pattern occurrences to be replaced . Defaults to 0. If count is a positive integer n , no more than n occurrences will be replaced , otherwise all occurrences will be replaced . flags A list of flags defined in the ` ` re ` ` module documentation from the Python standard library . Each list item should be a string that will correlate to the human - friendly flag name . E . g . , ` ` [ ' IGNORECASE ' , ' MULTILINE ' ] ` ` . Optionally , ` ` flags ` ` may be an int , with a value corresponding to the XOR ( ` ` | ` ` ) of all the desired flags . Defaults to ` ` 8 ` ` ( which equates to ` ` [ ' MULTILINE ' ] ` ` ) . . . note : : ` ` file . replace ` ` reads the entire file as a string to support multiline regex patterns . Therefore , when using anchors such as ` ` ^ ` ` or ` ` $ ` ` in the pattern , those anchors may be relative to the line OR relative to the file . The default for ` ` file . replace ` ` is to treat anchors as relative to the line , which is implemented by setting the default value of ` ` flags ` ` to ` ` [ ' MULTILINE ' ] ` ` . When overriding the default value for ` ` flags ` ` , if ` ` ' MULTILINE ' ` ` is not present then anchors will be relative to the file . If the desired behavior is for anchors to be relative to the line , then simply add ` ` ' MULTILINE ' ` ` to the list of flags . bufsize How much of the file to buffer into memory at once . The default value ` ` 1 ` ` processes one line at a time . The special value ` ` file ` ` may be specified which will read the entire file into memory before processing . append _ if _ not _ found : False If set to ` ` True ` ` , and pattern is not found , then the content will be appended to the file . . . versionadded : : 2014.7.0 prepend _ if _ not _ found : False If set to ` ` True ` ` and pattern is not found , then the content will be prepended to the file . . . versionadded : : 2014.7.0 not _ found _ content Content to use for append / prepend if not found . If ` ` None ` ` ( default ) , uses ` ` repl ` ` . Useful when ` ` repl ` ` uses references to group in pattern . . . versionadded : : 2014.7.0 backup The file extension to use for a backup of the file before editing . Set to ` ` False ` ` to skip making a backup . show _ changes : True Output a unified diff of the old file and the new file . If ` ` False ` ` return a boolean if any changes were made . Returns a boolean or a string . . . note : Using this option will store two copies of the file in memory ( the original version and the edited version ) in order to generate the diff . This may not normally be a concern , but could impact performance if used with large files . ignore _ if _ missing : False . . versionadded : : 2016.3.4 Controls what to do if the file is missing . If set to ` ` False ` ` , the state will display an error raised by the execution module . If set to ` ` True ` ` , the state will simply report no changes . backslash _ literal : False . . versionadded : : 2016.11.7 Interpret backslashes as literal backslashes for the repl and not escape characters . This will help when using append / prepend so that the backslashes are not interpreted for the repl on the second run of the state . For complex regex patterns , it can be useful to avoid the need for complex quoting and escape sequences by making use of YAML ' s multiline string syntax . . . code - block : : yaml complex _ search _ and _ replace : file . replace : # < . . . snip . . . > - pattern : | CentOS \ ( 2.6.32 [ ^ \ \ n ] + \ \ n \ s + root [ ^ \ \ n ] + \ \ n \ ) + . . note : : When using YAML multiline string syntax in ` ` pattern : ` ` , make sure to also use that syntax in the ` ` repl : ` ` part , or you might loose line feeds . When regex capture groups are used in ` ` pattern : ` ` , their captured value is available for reuse in the ` ` repl : ` ` part as a backreference ( ex . ` ` \ 1 ` ` ) . . . code - block : : yaml add _ login _ group _ to _ winbind _ ssh _ access _ list : file . replace : - name : ' / etc / security / pam _ winbind . conf ' - pattern : ' ^ ( require _ membership _ of = ) ( . * ) $ ' - repl : ' \ 1\2 , append - new - group - to - line ' . . note : : The ` ` file . replace ` ` state uses Python ' s ` ` re ` ` module . For more advanced options , see https : / / docs . python . org / 2 / library / re . html'''
name = os . path . expanduser ( name ) ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' } if not name : return _error ( ret , 'Must provide name to file.replace' ) check_res , check_msg = _check_file ( name ) if not check_res : if ignore_if_missing and 'file not found' in check_msg : ret [ 'comment' ] = 'No changes needed to be made' return ret else : return _error ( ret , check_msg ) changes = __salt__ [ 'file.replace' ] ( name , pattern , repl , count = count , flags = flags , bufsize = bufsize , append_if_not_found = append_if_not_found , prepend_if_not_found = prepend_if_not_found , not_found_content = not_found_content , backup = backup , dry_run = __opts__ [ 'test' ] , show_changes = show_changes , ignore_if_missing = ignore_if_missing , backslash_literal = backslash_literal ) if changes : ret [ 'changes' ] [ 'diff' ] = changes if __opts__ [ 'test' ] : ret [ 'result' ] = None ret [ 'comment' ] = 'Changes would have been made' else : ret [ 'result' ] = True ret [ 'comment' ] = 'Changes were made' else : ret [ 'result' ] = True ret [ 'comment' ] = 'No changes needed to be made' return ret
def delete ( table , session , conds ) : """Performs a hard delete on a row , which means the row is deleted from the Savage table as well as the archive table . : param table : the model class which inherits from : class : ` ~ savage . models . user _ table . SavageModelMixin ` and specifies the model of the user table from which we are querying : param session : a sqlalchemy session with connections to the database : param conds : a list of dictionary of key value pairs where keys are columns in the table and values are values the column should take on . If specified , this query will only return rows where the columns meet all the conditions . The columns specified in this dictionary must be exactly the unique columns that versioning pivots around ."""
with session . begin_nested ( ) : archive_conds_list = _get_conditions_list ( table , conds ) session . execute ( sa . delete ( table . ArchiveTable , whereclause = _get_conditions ( archive_conds_list ) ) ) conds_list = _get_conditions_list ( table , conds , archive = False ) session . execute ( sa . delete ( table , whereclause = _get_conditions ( conds_list ) ) )
def generate ( self , references , buffers ) : '''Create a JSON representation of this event suitable for sending to clients . . . code - block : : python ' kind ' : ' ColumnDataChanged ' ' column _ source ' : < reference to a CDS > ' new ' : < new data to steam to column _ source > ' cols ' : < specific columns to update > Args : references ( dict [ str , Model ] ) : If the event requires references to certain models in order to function , they may be collected here . * * This is an " out " parameter * * . The values it contains will be modified in - place . buffers ( set ) : If the event needs to supply any additional Bokeh protocol buffers , they may be added to this set . * * This is an " out " parameter * * . The values it contains will be modified in - place .'''
from . . util . serialization import transform_column_source_data data_dict = transform_column_source_data ( self . column_source . data , buffers = buffers , cols = self . cols ) return { 'kind' : 'ColumnDataChanged' , 'column_source' : self . column_source . ref , 'new' : data_dict , 'cols' : self . cols }
def _set_spyder_breakpoints ( self , breakpoints ) : """Set all Spyder breakpoints in an active pdb session"""
if not self . _pdb_obj : return # Breakpoints come serialized from Spyder . We send them # in a list of one element to be able to send them at all # in Python 2 serialized_breakpoints = breakpoints [ 0 ] breakpoints = pickle . loads ( serialized_breakpoints ) self . _pdb_obj . set_spyder_breakpoints ( breakpoints )
def build_stylemap_names ( family_name , style_name , is_bold = False , is_italic = False , linked_style = None ) : """Build UFO ` styleMapFamilyName ` and ` styleMapStyleName ` based on the family and style names , and the entries in the " Style Linking " section of the " Instances " tab in the " Font Info " . The value of ` styleMapStyleName ` can be either " regular " , " bold " , " italic " or " bold italic " , depending on the values of ` is _ bold ` and ` is _ italic ` . The ` styleMapFamilyName ` is a combination of the ` family _ name ` and the ` linked _ style ` . If ` linked _ style ` is unset or set to ' Regular ' , the linked style is equal to the style _ name with the last occurrences of the strings ' Regular ' , ' Bold ' and ' Italic ' stripped from it ."""
styleMapStyleName = ( " " . join ( s for s in ( "bold" if is_bold else "" , "italic" if is_italic else "" ) if s ) or "regular" ) if not linked_style or linked_style == "Regular" : linked_style = _get_linked_style ( style_name , is_bold , is_italic ) if linked_style : styleMapFamilyName = ( family_name or "" ) + " " + linked_style else : styleMapFamilyName = family_name return styleMapFamilyName , styleMapStyleName
def flip_uuid_parts ( uuid ) : """Flips high and low segments of the timestamp portion of a UUID string . This enables correct lexicographic sorting . Because it is a simple flip , this function works in both directions ."""
flipped_uuid = uuid . split ( '-' ) flipped_uuid [ 0 ] , flipped_uuid [ 2 ] = flipped_uuid [ 2 ] , flipped_uuid [ 0 ] flipped_uuid = '-' . join ( flipped_uuid ) return flipped_uuid
def patch_namespaced_replication_controller ( self , name , namespace , body , ** kwargs ) : # noqa : E501 """patch _ namespaced _ replication _ controller # noqa : E501 partially update the specified ReplicationController # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . patch _ namespaced _ replication _ controller ( name , namespace , body , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str name : name of the ReplicationController ( required ) : param str namespace : object name and auth scope , such as for teams and projects ( required ) : param UNKNOWN _ BASE _ TYPE body : ( required ) : param str pretty : If ' true ' , then the output is pretty printed . : param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed : return : V1ReplicationController If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . patch_namespaced_replication_controller_with_http_info ( name , namespace , body , ** kwargs ) # noqa : E501 else : ( data ) = self . patch_namespaced_replication_controller_with_http_info ( name , namespace , body , ** kwargs ) # noqa : E501 return data
def _highlight_path ( self , hlpath ) : """Highlight an entry in the table and associated marking ."""
self . logger . debug ( 'Highlighting {0}' . format ( hlpath ) ) self . treeview . select_path ( hlpath ) # TODO : Does not work in Qt . This is known issue in Ginga . self . treeview . scroll_to_path ( hlpath )
def vel_grad_avg ( self ) : """Calculate the average velocity gradient ( G - bar ) of water flowing through the flocculator . : returns : Average velocity gradient ( G - bar ) : rtype : float * 1 / second"""
return ( ( u . standard_gravity * self . HL ) / ( pc . viscosity_kinematic ( self . temp ) * self . Gt ) ) . to ( u . s ** - 1 )
def do_execute ( self ) : """The actual execution of the actor . : return : None if successful , otherwise error message : rtype : str"""
fname = str ( self . input . payload ) spattern = str ( self . resolve_option ( "regexp" ) ) pattern = None if ( spattern is not None ) and ( spattern != ".*" ) : pattern = re . compile ( spattern ) if ( pattern is None ) or ( pattern . match ( fname ) ) : os . remove ( fname ) self . _output . append ( self . input ) return None
def getWifiInfo ( self , wifiInterfaceId = 1 , timeout = 1 ) : """Execute GetInfo action to get Wifi basic information ' s . : param int wifiInterfaceId : the id of the Wifi interface : param float timeout : the timeout to wait for the action to be executed : return : the basic informations : rtype : WifiBasicInfo"""
namespace = Wifi . getServiceType ( "getWifiInfo" ) + str ( wifiInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetInfo" , timeout = timeout ) return WifiBasicInfo ( results )
def sync ( func ) : """Decorator to make a task synchronous ."""
sync_timeout = 3600 # Match standard synchronous timeout . def wraps ( * args , ** kwargs ) : task = func ( * args , ** kwargs ) task . wait_for_result ( timeout = sync_timeout ) result = json . loads ( task . result ) return result return wraps
def show_report ( self , session , * levels ) : """Shows the report that has been generated"""
if levels : self . make_report ( session , * levels ) if self . __report : session . write_line ( self . to_json ( self . __report ) ) else : session . write_line ( "No report to show" )
def warning ( self , text ) : """Posts a warning message adding a timestamp and logging level to it for both file and console handlers . Logger uses a redraw rate because of console flickering . That means it will not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the right time . Logger will redraw at a given time period AND when new messages or progress are logged . If you still want to force redraw immediately ( may produce flickering ) then call ' flush ' method . : param text : The text to log into file and console ."""
self . queue . put ( dill . dumps ( LogMessageCommand ( text = text , level = logging . WARNING ) ) )
def get_plugins ( sites = None ) : """Returns all GoScale plugins It ignored all other django - cms plugins"""
plugins = [ ] # collect GoScale plugins for plugin in CMSPlugin . objects . all ( ) : if plugin : cl = plugin . get_plugin_class ( ) . model if 'posts' in cl . _meta . get_all_field_names ( ) : instance = plugin . get_plugin_instance ( ) [ 0 ] plugins . append ( instance ) # Filter by sites if sites and len ( sites ) > 0 : onsite = [ ] for plugin in plugins : try : if plugin . page . site in sites : onsite . append ( plugin ) except AttributeError : continue return onsite return plugins
def loss ( logits , labels , batch_size = None ) : """Adds all losses for the model . Note the final loss is not returned . Instead , the list of losses are collected by slim . losses . The losses are accumulated in tower _ loss ( ) and summed to calculate the total loss . Args : logits : List of logits from inference ( ) . Each entry is a 2 - D float Tensor . labels : Labels from distorted _ inputs or inputs ( ) . 1 - D tensor of shape [ batch _ size ] batch _ size : integer"""
if not batch_size : batch_size = FLAGS . batch_size # Reshape the labels into a dense Tensor of # shape [ FLAGS . batch _ size , num _ classes ] . sparse_labels = tf . reshape ( labels , [ batch_size , 1 ] ) indices = tf . reshape ( tf . range ( batch_size ) , [ batch_size , 1 ] ) concated = tf . concat ( axis = 1 , values = [ indices , sparse_labels ] ) num_classes = logits [ 0 ] . get_shape ( ) [ - 1 ] . value dense_labels = tf . sparse_to_dense ( concated , [ batch_size , num_classes ] , 1.0 , 0.0 ) # Cross entropy loss for the main softmax prediction . slim . losses . cross_entropy_loss ( logits [ 0 ] , dense_labels , label_smoothing = 0.1 , weight = 1.0 ) # Cross entropy loss for the auxiliary softmax head . slim . losses . cross_entropy_loss ( logits [ 1 ] , dense_labels , label_smoothing = 0.1 , weight = 0.4 , scope = 'aux_loss' )
def write_blacklist ( self ) : """Write the blacklist ."""
oldmask = os . umask ( 0077 ) for key , value in self . blacklist . items ( ) : self . write ( u"%d %s%s" % ( value , repr ( key ) , os . linesep ) ) self . close_fileoutput ( ) # restore umask os . umask ( oldmask )
def __create_index ( self , keys , index_options , session , ** kwargs ) : """Internal create index helper . : Parameters : - ` keys ` : a list of tuples [ ( key , type ) , ( key , type ) , . . . ] - ` index _ options ` : a dict of index options . - ` session ` ( optional ) : a : class : ` ~ pymongo . client _ session . ClientSession ` ."""
index_doc = helpers . _index_document ( keys ) index = { "key" : index_doc } collation = validate_collation_or_none ( index_options . pop ( 'collation' , None ) ) index . update ( index_options ) with self . _socket_for_writes ( session ) as sock_info : if collation is not None : if sock_info . max_wire_version < 5 : raise ConfigurationError ( 'Must be connected to MongoDB 3.4+ to use collations.' ) else : index [ 'collation' ] = collation cmd = SON ( [ ( 'createIndexes' , self . name ) , ( 'indexes' , [ index ] ) ] ) cmd . update ( kwargs ) self . _command ( sock_info , cmd , read_preference = ReadPreference . PRIMARY , codec_options = _UNICODE_REPLACE_CODEC_OPTIONS , write_concern = self . _write_concern_for ( session ) , session = session )
def sizeof_fmt ( num , suffix = 'B' ) : """Adapted from https : / / stackoverflow . com / a / 1094933 Re : precision - display enough decimals to show progress on a slow ( < 5 MB / s ) Internet connection"""
precision = { '' : 0 , 'Ki' : 0 , 'Mi' : 0 , 'Gi' : 3 , 'Ti' : 6 , 'Pi' : 9 , 'Ei' : 12 , 'Zi' : 15 } for unit in [ '' , 'Ki' , 'Mi' , 'Gi' , 'Ti' , 'Pi' , 'Ei' , 'Zi' ] : if abs ( num ) < 1024.0 : format_string = "{number:.%df} {unit}{suffix}" % precision [ unit ] return format_string . format ( number = num , unit = unit , suffix = suffix ) num /= 1024.0 return "%.18f %s%s" % ( num , 'Yi' , suffix )
def kscale ( matrix , k = 7 , dists = None ) : """Returns the local scale based on the k - th nearest neighbour"""
dists = ( kdists ( matrix , k = k ) if dists is None else dists ) scale = dists . dot ( dists . T ) return scale
def plot ( self ) : """Plot the empirical histogram versus best - fit distribution ' s PDF ."""
plt . plot ( self . bin_edges , self . hist , self . bin_edges , self . best_pdf )
def configure_parser ( self , parser ) : """Adds the necessary supported arguments to the argument parser . Args : parser ( argparse . ArgumentParser ) : The parser to add arguments to ."""
parser . add_argument ( "--log_path" , default = "" , help = "The log file path" ) parser . add_argument ( "--verbose" , help = "Increase logging verbosity" , action = "store_true" )
def _get_asym_alpha_tag ( self , a , b ) : """Find asymmetry from cryo oven with alpha detectors . a : list of alpha detector histograms ( each helicity ) b : list of beta detector histograms ( each helicity ) 1 + 1 - 2 + 2-"""
# beta in coincidence with alpha coin = a [ : 4 ] # beta coincidence with no alpha no_coin = a [ 4 : 8 ] # get split helicity asym from hel_coin = self . _get_asym_hel ( coin ) hel_no_coin = self . _get_asym_hel ( no_coin ) hel_reg = self . _get_asym_hel ( b ) # get combined helicities com_coin = self . _get_asym_comb ( coin ) com_no_coin = self . _get_asym_comb ( no_coin ) com_reg = self . _get_asym_comb ( b ) # make output return ( hel_coin , hel_no_coin , hel_reg , com_coin , com_no_coin , com_reg )
def choice_default_add_related_pks ( self , obj ) : """Add related primary keys to a Choice instance ."""
if not hasattr ( obj , '_voter_pks' ) : obj . _voter_pks = obj . voters . values_list ( 'pk' , flat = True )
def evaluate ( self , test_events ) : """Iterate recommend / update procedure and compute incremental recall . Args : test _ events ( list of Event ) : Positive test events . Returns : list of tuples : ( rank , recommend time , update time )"""
for i , e in enumerate ( test_events ) : self . __validate ( e ) # target items ( all or unobserved depending on a detaset ) unobserved = set ( self . item_buffer ) if not self . repeat : unobserved -= self . rec . users [ e . user . index ] [ 'known_items' ] # item i interacted by user u must be in the recommendation candidate # even if it is a new item unobserved . add ( e . item . index ) candidates = np . asarray ( list ( unobserved ) ) # make top - { at } recommendation for the 1001 items start = time . clock ( ) recos , scores = self . __recommend ( e , candidates ) recommend_time = ( time . clock ( ) - start ) rank = np . where ( recos == e . item . index ) [ 0 ] [ 0 ] # Step 2 : update the model with the observed event self . rec . users [ e . user . index ] [ 'known_items' ] . add ( e . item . index ) start = time . clock ( ) self . rec . update ( e ) update_time = ( time . clock ( ) - start ) self . item_buffer . append ( e . item . index ) # ( top - 1 score , where the correct item is ranked , rec time , update time ) yield scores [ 0 ] , rank , recommend_time , update_time
def _render_content ( self , content , ** settings ) : """Perform widget rendering , but do not print anything ."""
result = [ ] columns = settings [ self . SETTING_COLUMNS ] # Format each table cell into string . ( columns , content ) = self . table_format ( columns , content ) # Enumerate each table row . if settings [ self . SETTING_FLAG_ENUMERATE ] : ( columns , content ) = self . table_enumerate ( columns , content ) # Calculate the dimensions of each table column . dimensions = self . table_measure ( columns , content ) # Display table header . sb = { k : settings [ k ] for k in ( self . SETTING_BORDER_STYLE , self . SETTING_BORDER_FORMATING ) } result . append ( self . fmt_border ( dimensions , 't' , ** sb ) ) if settings [ self . SETTING_FLAG_HEADER ] : s = { k : settings [ k ] for k in ( self . SETTING_FLAG_PLAIN , self . SETTING_BORDER_STYLE , self . SETTING_BORDER_FORMATING ) } s [ self . SETTING_TEXT_FORMATING ] = settings [ self . SETTING_HEADER_FORMATING ] result . append ( self . fmt_row_header ( columns , dimensions , ** s ) ) result . append ( self . fmt_border ( dimensions , 'm' , ** sb ) ) # Display table body . for row in content : s = { k : settings [ k ] for k in ( self . SETTING_FLAG_PLAIN , self . SETTING_BORDER_STYLE , self . SETTING_BORDER_FORMATING ) } s [ self . SETTING_TEXT_FORMATING ] = settings [ self . SETTING_TEXT_FORMATING ] result . append ( self . fmt_row ( columns , dimensions , row , ** s ) ) # Display table footer result . append ( self . fmt_border ( dimensions , 'b' , ** sb ) ) return result
def version ( serial = None ) : """Returns version information for MicroPython running on the connected device . If such information is not available or the device is not running MicroPython , raise a ValueError . If any other exception is thrown , the device was running MicroPython but there was a problem parsing the output ."""
try : out , err = execute ( [ 'import os' , 'print(os.uname())' , ] , serial ) if err : raise ValueError ( clean_error ( err ) ) except ValueError : # Re - raise any errors from stderr raised in the try block . raise except Exception : # Raise a value error to indicate unable to find something on the # microbit that will return parseable information about the version . # It doesn ' t matter what the error is , we just need to indicate a # failure with the expected ValueError exception . raise ValueError ( ) raw = out . decode ( 'utf-8' ) . strip ( ) raw = raw [ 1 : - 1 ] items = raw . split ( ', ' ) result = { } for item in items : key , value = item . split ( '=' ) result [ key ] = value [ 1 : - 1 ] return result
def _from_dict ( cls , _dict ) : """Initialize a FeedbackDataInput object from a json dictionary ."""
args = { } if 'feedback_type' in _dict : args [ 'feedback_type' ] = _dict . get ( 'feedback_type' ) else : raise ValueError ( 'Required property \'feedback_type\' not present in FeedbackDataInput JSON' ) if 'document' in _dict : args [ 'document' ] = ShortDoc . _from_dict ( _dict . get ( 'document' ) ) if 'model_id' in _dict : args [ 'model_id' ] = _dict . get ( 'model_id' ) if 'model_version' in _dict : args [ 'model_version' ] = _dict . get ( 'model_version' ) if 'location' in _dict : args [ 'location' ] = Location . _from_dict ( _dict . get ( 'location' ) ) else : raise ValueError ( 'Required property \'location\' not present in FeedbackDataInput JSON' ) if 'text' in _dict : args [ 'text' ] = _dict . get ( 'text' ) else : raise ValueError ( 'Required property \'text\' not present in FeedbackDataInput JSON' ) if 'original_labels' in _dict : args [ 'original_labels' ] = OriginalLabelsIn . _from_dict ( _dict . get ( 'original_labels' ) ) else : raise ValueError ( 'Required property \'original_labels\' not present in FeedbackDataInput JSON' ) if 'updated_labels' in _dict : args [ 'updated_labels' ] = UpdatedLabelsIn . _from_dict ( _dict . get ( 'updated_labels' ) ) else : raise ValueError ( 'Required property \'updated_labels\' not present in FeedbackDataInput JSON' ) return cls ( ** args )
def prepread ( sheet , header = True , startcell = None , stopcell = None ) : """Return four StartStop objects , defining the outer bounds of header row and data range , respectively . If header is False , the first two items will be None . - - > [ headstart , headstop , datstart , datstop ] sheet : xlrd . sheet . Sheet instance Ready for use . header : bool or str True if the defined data range includes a header with field names . Else False - the whole range is data . If a string , it is spread sheet style notation of the startcell for the header ( " F9 " ) . The " width " of this record is the same as for the data . startcell : str or None If given , a spread sheet style notation of the cell where reading start , ( " F9 " ) . stopcell : str or None A spread sheet style notation of the cell where data end , ( " F9 " ) . startcell and stopcell can both be None , either one specified or both specified . Note to self : consider making possible to specify headers in a column ."""
datstart , datstop = _get_startstop ( sheet , startcell , stopcell ) headstart , headstop = StartStop ( 0 , 0 ) , StartStop ( 0 , 0 ) # Holders def typicalprep ( ) : headstart . row , headstart . col = datstart . row , datstart . col headstop . row , headstop . col = datstart . row + 1 , datstop . col # Tick the data start row by 1: datstart . row += 1 def offsetheaderprep ( ) : headstart . row , headstart . col = headrow , headcol headstop . row = headrow + 1 headstop . col = headcol + ( datstop . col - datstart . col ) # stop > start if header is True : # Simply the toprow of the table . typicalprep ( ) return [ headstart , headstop , datstart , datstop ] elif header : # Then it is a string if not False . ( " F9 " ) m = re . match ( XLNOT_RX , header ) headrow = int ( m . group ( 2 ) ) - 1 headcol = letter2num ( m . group ( 1 ) , zbase = True ) if headrow == datstart . row and headcol == datstart . col : typicalprep ( ) return [ headstart , headstop , datstart , datstop ] elif headrow == datstart . row : typicalprep ( ) offsetheaderprep ( ) return [ headstart , headstop , datstart , datstop ] else : offsetheaderprep ( ) return [ headstart , headstop , datstart , datstop ] else : # header is False return [ None , None , datstart , datstop ]
def remove_unnecessary_self_time_nodes ( frame , options ) : '''When a frame has only one child , and that is a self - time frame , remove that node , since it ' s unnecessary - it clutters the output and offers no additional information .'''
if frame is None : return None if len ( frame . children ) == 1 and isinstance ( frame . children [ 0 ] , SelfTimeFrame ) : child = frame . children [ 0 ] frame . self_time += child . self_time child . remove_from_parent ( ) for child in frame . children : remove_unnecessary_self_time_nodes ( child , options = options ) return frame
def read_one ( self , sequence ) : """Reads one item from the Ringbuffer . If the sequence is one beyond the current tail , this call blocks until an item is added . Currently it isn ' t possible to control how long this call is going to block . : param sequence : ( long ) , the sequence of the item to read . : return : ( object ) , the read item ."""
check_not_negative ( sequence , "sequence can't be smaller than 0" ) return self . _encode_invoke ( ringbuffer_read_one_codec , sequence = sequence )
def wrap_deepmind ( env , dim = 84 , framestack = True ) : """Configure environment for DeepMind - style Atari . Note that we assume reward clipping is done outside the wrapper . Args : dim ( int ) : Dimension to resize observations to ( dim x dim ) . framestack ( bool ) : Whether to framestack observations ."""
env = MonitorEnv ( env ) env = NoopResetEnv ( env , noop_max = 30 ) if "NoFrameskip" in env . spec . id : env = MaxAndSkipEnv ( env , skip = 4 ) env = EpisodicLifeEnv ( env ) if "FIRE" in env . unwrapped . get_action_meanings ( ) : env = FireResetEnv ( env ) env = WarpFrame ( env , dim ) # env = ScaledFloatFrame ( env ) # TODO : use for dqn ? # env = ClipRewardEnv ( env ) # reward clipping is handled by policy eval if framestack : env = FrameStack ( env , 4 ) return env
def extend ( cls , name ) : """派生一个新的 leancloud . Object 子类 : param name : 子类名称 : type name : string _ types : return : 派生的子类 : rtype : ObjectMeta"""
if six . PY2 and isinstance ( name , six . text_type ) : # In python2 , class name must be a python2 str . name = name . encode ( 'utf-8' ) return type ( name , ( cls , ) , { } )
def signal_alias_exists ( alias : str ) -> bool : """Checks if signal alias exists . : param alias : Signal alias . : return :"""
if SignalDispatcher . signals . get ( alias ) : return True return False
def buy_close ( id_or_ins , amount , price = None , style = None , close_today = False ) : """平卖仓 : param id _ or _ ins : 下单标的物 : type id _ or _ ins : : class : ` ~ Instrument ` object | ` str ` | List [ : class : ` ~ Instrument ` ] | List [ ` str ` ] : param int amount : 下单手数 : param float price : 下单价格 , 默认为None , 表示 : class : ` ~ MarketOrder ` , 此参数主要用于简化 ` style ` 参数 。 : param style : 下单类型 , 默认是市价单 。 目前支持的订单类型有 : class : ` ~ LimitOrder ` 和 : class : ` ~ MarketOrder ` : type style : ` OrderStyle ` object : param bool close _ today : 是否指定发平今仓单 , 默认为False , 发送平仓单 : return : : class : ` ~ Order ` object | list [ : class : ` ~ Order ` ] | None : example : . . code - block : : python # 市价单将现有IF1603空仓买入平仓2张 : buy _ close ( ' IF1603 ' , 2)"""
position_effect = POSITION_EFFECT . CLOSE_TODAY if close_today else POSITION_EFFECT . CLOSE return order ( id_or_ins , amount , SIDE . BUY , position_effect , cal_style ( price , style ) )
def add_callback ( self , name , func ) : """Add a callback when device events happen . Args : name ( str ) : currently support ' on _ scan ' and ' on _ disconnect ' func ( callable ) : the function that should be called"""
if name == 'on_scan' : events = [ 'device_seen' ] def callback ( _conn_string , _conn_id , _name , event ) : func ( self . id , event , event . get ( 'validity_period' , 60 ) ) elif name == 'on_report' : events = [ 'report' , 'broadcast' ] def callback ( _conn_string , conn_id , _name , event ) : func ( conn_id , event ) elif name == 'on_trace' : events = [ 'trace' ] def callback ( _conn_string , conn_id , _name , event ) : func ( conn_id , event ) elif name == 'on_disconnect' : events = [ 'disconnection' ] def callback ( _conn_string , conn_id , _name , _event ) : func ( self . id , conn_id ) else : raise ArgumentError ( "Unknown callback type {}" . format ( name ) ) self . _adapter . register_monitor ( [ None ] , events , callback )
def _set_import_ ( self , v , load = False ) : """Setter method for import _ , mapped from YANG variable / rbridge _ id / evpn _ instance / route _ target / import ( list ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ import _ is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ import _ ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = YANGListType ( "target_community" , import_ . import_ , yang_name = "import" , rest_name = "import" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'target-community' , extensions = { u'tailf-common' : { u'info' : u'Configure Target VPN Extended Communities' , u'cli-suppress-mode' : None , u'callpoint' : u'EvpnRTConfigImport' , u'cli-full-no' : None } } ) , is_container = 'list' , yang_name = "import" , rest_name = "import" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure Target VPN Extended Communities' , u'cli-suppress-mode' : None , u'callpoint' : u'EvpnRTConfigImport' , u'cli-full-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-bgp' , defining_module = 'brocade-bgp' , yang_type = 'list' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """import_ must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("target_community",import_.import_, yang_name="import", rest_name="import", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'callpoint': u'EvpnRTConfigImport', u'cli-full-no': None}}), is_container='list', yang_name="import", rest_name="import", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'callpoint': u'EvpnRTConfigImport', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""" , } ) self . __import_ = t if hasattr ( self , '_set' ) : self . _set ( )
def asDatetime ( self , tzinfo = None ) : """Return this time as an aware datetime . datetime instance . The returned datetime object has the specified tzinfo , or a tzinfo describing UTC if the tzinfo parameter is None ."""
if tzinfo is None : tzinfo = FixedOffset ( 0 , 0 ) if not self . isTimezoneDependent ( ) : return self . _time . replace ( tzinfo = tzinfo ) else : return self . _time . replace ( tzinfo = FixedOffset ( 0 , 0 ) ) . astimezone ( tzinfo )
def toggle_pac ( self ) : """Enable and disable PAC options ."""
if Pac is not None : pac_on = self . pac [ 'pac_on' ] . get_value ( ) self . pac [ 'prep' ] . setEnabled ( pac_on ) self . pac [ 'box_metric' ] . setEnabled ( pac_on ) self . pac [ 'box_complex' ] . setEnabled ( pac_on ) self . pac [ 'box_surro' ] . setEnabled ( pac_on ) self . pac [ 'box_opts' ] . setEnabled ( pac_on ) if not pac_on : self . pac [ 'prep' ] . set_value ( False ) if Pac is not None and pac_on : pac = self . pac hilb_on = pac [ 'hilbert_on' ] . isChecked ( ) wav_on = pac [ 'wavelet_on' ] . isChecked ( ) for button in pac [ 'hilbert' ] . values ( ) : button [ 0 ] . setEnabled ( hilb_on ) if button [ 1 ] is not None : button [ 1 ] . setEnabled ( hilb_on ) pac [ 'wav_width' ] [ 0 ] . setEnabled ( wav_on ) pac [ 'wav_width' ] [ 1 ] . setEnabled ( wav_on ) if pac [ 'metric' ] . get_value ( ) in [ 'Kullback-Leibler Distance' , 'Heights ratio' ] : pac [ 'nbin' ] [ 0 ] . setEnabled ( True ) pac [ 'nbin' ] [ 1 ] . setEnabled ( True ) else : pac [ 'nbin' ] [ 0 ] . setEnabled ( False ) pac [ 'nbin' ] [ 1 ] . setEnabled ( False ) if pac [ 'metric' ] == 'ndPac' : for button in pac [ 'surro' ] . values ( ) : button [ 0 ] . setEnabled ( False ) if button [ 1 ] is not None : button [ 1 ] . setEnabled ( False ) pac [ 'surro' ] [ 'pval' ] [ 0 ] . setEnabled ( True ) ndpac_on = pac [ 'metric' ] . get_value ( ) == 'ndPac' surro_on = logical_and ( pac [ 'surro_method' ] . get_value ( ) != '' 'No surrogates' , not ndpac_on ) norm_on = pac [ 'surro_norm' ] . get_value ( ) != 'No normalization' blocks_on = 'across time' in pac [ 'surro_method' ] . get_value ( ) pac [ 'surro_method' ] . setEnabled ( not ndpac_on ) for button in pac [ 'surro' ] . values ( ) : button [ 0 ] . setEnabled ( surro_on and norm_on ) if button [ 1 ] is not None : button [ 1 ] . setEnabled ( surro_on and norm_on ) pac [ 'surro' ] [ 'nblocks' ] [ 0 ] . setEnabled ( blocks_on ) pac [ 'surro' ] [ 'nblocks' ] [ 1 ] . setEnabled ( blocks_on ) if ndpac_on : pac [ 'surro_method' ] . set_value ( 'No surrogates' ) pac [ 'surro' ] [ 'pval' ] [ 0 ] . setEnabled ( True )